repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
cablehead/vanilla | vanilla/message.py | Sender.connect | python | def connect(self, recver):
r1 = recver
m1 = r1.middle
s2 = self
m2 = self.middle
r2 = self.other
r2.middle = m1
del m2.sender
del m2.recver
del m1.recver
m1.recver = weakref.ref(r2, m1.on_abandoned)
m1.recver_current = m2.recver_current
del r1.middle
del s2.middle
# if we are currently a chain, return the last recver of our chain
while True:
if getattr(r2, 'downstream', None) is None:
break
r2 = r2.downstream.other
return r2 | Rewire:
s1 -> m1 <- r1 --> s2 -> m2 <- r2
To:
s1 -> m1 <- r2 | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/message.py#L267-L296 | null | class Sender(End):
@property
def current(self):
return self.middle.sender_current
@current.setter
def current(self, value):
self.middle.sender_current = value
@property
def other(self):
return self.middle.recver()
def send(self, item, timeout=-1):
"""
Send an *item* on this pair. This will block unless our Rever is ready,
either forever or until *timeout* milliseconds.
"""
if not self.ready:
self.pause(timeout=timeout)
if isinstance(item, Exception):
return self.hub.throw_to(self.other.peak, item)
return self.hub.switch_to(self.other.peak, self.other, item)
def handover(self, recver):
assert recver.ready
recver.select()
# switch directly, as we need to pause
_, ret = recver.other.peak.switch(recver.other, None)
recver.unselect()
return ret
def clear(self):
self.send(NoState)
|
cablehead/vanilla | vanilla/message.py | Recver.recv | python | def recv(self, timeout=-1):
if self.ready:
return self.other.handover(self)
return self.pause(timeout=timeout) | Receive and item from our Sender. This will block unless our Sender is
ready, either forever or unless *timeout* milliseconds. | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/message.py#L312-L320 | [
"def pause(self, timeout=-1):\n self.select()\n try:\n _, ret = self.hub.pause(timeout=timeout)\n finally:\n self.unselect()\n return ret\n"
] | class Recver(End):
@property
def current(self):
return self.middle.recver_current
@current.setter
def current(self, value):
self.middle.recver_current = value
@property
def other(self):
return self.middle.sender()
def __iter__(self):
while True:
try:
yield self.recv()
except vanilla.exception.Halt:
break
def pipe(self, target):
"""
Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2)
"""
if callable(target):
sender, recver = self.hub.pipe()
# link the two ends in the closure with a strong reference to
# prevent them from being garbage collected if this piped section
# is used in a chain
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self)
def map(self, f):
"""
*f* is a callable that takes a single argument. All values sent on this
Recver's Sender will be passed to *f* to be transformed::
def double(i):
return i * 2
sender, recver = h.pipe()
recver.map(double)
h.spawn(sender.send, 2)
recver.recv() # returns 4
"""
@self.pipe
def recver(recver, sender):
for item in recver:
try:
sender.send(f(item))
except Exception, e:
sender.send(e)
return recver
def consume(self, f):
"""
Creates a sink which consumes all values for this Recver. *f* is a
callable which takes a single argument. All values sent on this
Recver's Sender will be passed to *f* for processing. Unlike *map*
however consume terminates this chain::
sender, recver = h.pipe
@recver.consume
def _(data):
logging.info(data)
sender.send('Hello') # logs 'Hello'
"""
@self.hub.spawn
def _():
for item in self:
# TODO: think through whether trapping for HALT here is a good
# idea
try:
f(item)
except vanilla.exception.Halt:
self.close()
break
|
cablehead/vanilla | vanilla/message.py | Recver.pipe | python | def pipe(self, target):
if callable(target):
sender, recver = self.hub.pipe()
# link the two ends in the closure with a strong reference to
# prevent them from being garbage collected if this piped section
# is used in a chain
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self) | Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2) | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/message.py#L329-L386 | null | class Recver(End):
@property
def current(self):
return self.middle.recver_current
@current.setter
def current(self, value):
self.middle.recver_current = value
@property
def other(self):
return self.middle.sender()
def recv(self, timeout=-1):
"""
Receive and item from our Sender. This will block unless our Sender is
ready, either forever or unless *timeout* milliseconds.
"""
if self.ready:
return self.other.handover(self)
return self.pause(timeout=timeout)
def __iter__(self):
while True:
try:
yield self.recv()
except vanilla.exception.Halt:
break
def map(self, f):
"""
*f* is a callable that takes a single argument. All values sent on this
Recver's Sender will be passed to *f* to be transformed::
def double(i):
return i * 2
sender, recver = h.pipe()
recver.map(double)
h.spawn(sender.send, 2)
recver.recv() # returns 4
"""
@self.pipe
def recver(recver, sender):
for item in recver:
try:
sender.send(f(item))
except Exception, e:
sender.send(e)
return recver
def consume(self, f):
"""
Creates a sink which consumes all values for this Recver. *f* is a
callable which takes a single argument. All values sent on this
Recver's Sender will be passed to *f* for processing. Unlike *map*
however consume terminates this chain::
sender, recver = h.pipe
@recver.consume
def _(data):
logging.info(data)
sender.send('Hello') # logs 'Hello'
"""
@self.hub.spawn
def _():
for item in self:
# TODO: think through whether trapping for HALT here is a good
# idea
try:
f(item)
except vanilla.exception.Halt:
self.close()
break
|
cablehead/vanilla | vanilla/message.py | Recver.map | python | def map(self, f):
@self.pipe
def recver(recver, sender):
for item in recver:
try:
sender.send(f(item))
except Exception, e:
sender.send(e)
return recver | *f* is a callable that takes a single argument. All values sent on this
Recver's Sender will be passed to *f* to be transformed::
def double(i):
return i * 2
sender, recver = h.pipe()
recver.map(double)
h.spawn(sender.send, 2)
recver.recv() # returns 4 | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/message.py#L388-L409 | [
"def pipe(self, target):\n \"\"\"\n Pipes this Recver to *target*. *target* can either be `Sender`_ (or\n `Pair`_) or a callable.\n\n If *target* is a Sender, the two pairs are rewired so that sending on\n this Recver's Sender will now be directed to the target's Recver::\n\n sender1, recver1 = h.pipe()\n sender2, recver2 = h.pipe()\n\n recver1.pipe(sender2)\n\n h.spawn(sender1.send, 'foo')\n recver2.recv() # returns 'foo'\n\n If *target* is a callable, a new `Pipe`_ will be created. This Recver\n and the new Pipe's Sender are passed to the target callable to act as\n upstream and downstream. The callable can then do any processing\n desired including filtering, mapping and duplicating packets::\n\n sender, recver = h.pipe()\n\n def pipeline(upstream, downstream):\n for i in upstream:\n if i % 2:\n downstream.send(i*2)\n\n recver = recver.pipe(pipeline)\n\n @h.spawn\n def _():\n for i in xrange(10):\n sender.send(i)\n\n recver.recv() # returns 2 (0 is filtered, so 1*2)\n recver.recv() # returns 6 (2 is filtered, so 3*2)\n \"\"\"\n if callable(target):\n sender, recver = self.hub.pipe()\n\n # link the two ends in the closure with a strong reference to\n # prevent them from being garbage collected if this piped section\n # is used in a chain\n self.downstream = sender\n sender.upstream = self\n\n @self.hub.spawn\n def _():\n try:\n target(self, sender)\n except vanilla.exception.Halt:\n sender.close()\n\n return recver\n\n else:\n return target.connect(self)\n"
] | class Recver(End):
@property
def current(self):
return self.middle.recver_current
@current.setter
def current(self, value):
self.middle.recver_current = value
@property
def other(self):
return self.middle.sender()
def recv(self, timeout=-1):
"""
Receive and item from our Sender. This will block unless our Sender is
ready, either forever or unless *timeout* milliseconds.
"""
if self.ready:
return self.other.handover(self)
return self.pause(timeout=timeout)
def __iter__(self):
while True:
try:
yield self.recv()
except vanilla.exception.Halt:
break
def pipe(self, target):
"""
Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2)
"""
if callable(target):
sender, recver = self.hub.pipe()
# link the two ends in the closure with a strong reference to
# prevent them from being garbage collected if this piped section
# is used in a chain
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self)
def consume(self, f):
"""
Creates a sink which consumes all values for this Recver. *f* is a
callable which takes a single argument. All values sent on this
Recver's Sender will be passed to *f* for processing. Unlike *map*
however consume terminates this chain::
sender, recver = h.pipe
@recver.consume
def _(data):
logging.info(data)
sender.send('Hello') # logs 'Hello'
"""
@self.hub.spawn
def _():
for item in self:
# TODO: think through whether trapping for HALT here is a good
# idea
try:
f(item)
except vanilla.exception.Halt:
self.close()
break
|
cablehead/vanilla | vanilla/message.py | Recver.consume | python | def consume(self, f):
@self.hub.spawn
def _():
for item in self:
# TODO: think through whether trapping for HALT here is a good
# idea
try:
f(item)
except vanilla.exception.Halt:
self.close()
break | Creates a sink which consumes all values for this Recver. *f* is a
callable which takes a single argument. All values sent on this
Recver's Sender will be passed to *f* for processing. Unlike *map*
however consume terminates this chain::
sender, recver = h.pipe
@recver.consume
def _(data):
logging.info(data)
sender.send('Hello') # logs 'Hello' | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/message.py#L411-L435 | null | class Recver(End):
@property
def current(self):
return self.middle.recver_current
@current.setter
def current(self, value):
self.middle.recver_current = value
@property
def other(self):
return self.middle.sender()
def recv(self, timeout=-1):
"""
Receive and item from our Sender. This will block unless our Sender is
ready, either forever or unless *timeout* milliseconds.
"""
if self.ready:
return self.other.handover(self)
return self.pause(timeout=timeout)
def __iter__(self):
while True:
try:
yield self.recv()
except vanilla.exception.Halt:
break
def pipe(self, target):
"""
Pipes this Recver to *target*. *target* can either be `Sender`_ (or
`Pair`_) or a callable.
If *target* is a Sender, the two pairs are rewired so that sending on
this Recver's Sender will now be directed to the target's Recver::
sender1, recver1 = h.pipe()
sender2, recver2 = h.pipe()
recver1.pipe(sender2)
h.spawn(sender1.send, 'foo')
recver2.recv() # returns 'foo'
If *target* is a callable, a new `Pipe`_ will be created. This Recver
and the new Pipe's Sender are passed to the target callable to act as
upstream and downstream. The callable can then do any processing
desired including filtering, mapping and duplicating packets::
sender, recver = h.pipe()
def pipeline(upstream, downstream):
for i in upstream:
if i % 2:
downstream.send(i*2)
recver = recver.pipe(pipeline)
@h.spawn
def _():
for i in xrange(10):
sender.send(i)
recver.recv() # returns 2 (0 is filtered, so 1*2)
recver.recv() # returns 6 (2 is filtered, so 3*2)
"""
if callable(target):
sender, recver = self.hub.pipe()
# link the two ends in the closure with a strong reference to
# prevent them from being garbage collected if this piped section
# is used in a chain
self.downstream = sender
sender.upstream = self
@self.hub.spawn
def _():
try:
target(self, sender)
except vanilla.exception.Halt:
sender.close()
return recver
else:
return target.connect(self)
def map(self, f):
"""
*f* is a callable that takes a single argument. All values sent on this
Recver's Sender will be passed to *f* to be transformed::
def double(i):
return i * 2
sender, recver = h.pipe()
recver.map(double)
h.spawn(sender.send, 2)
recver.recv() # returns 4
"""
@self.pipe
def recver(recver, sender):
for item in recver:
try:
sender.send(f(item))
except Exception, e:
sender.send(e)
return recver
|
cablehead/vanilla | vanilla/core.py | Hub.producer | python | def producer(self, f):
sender, recver = self.pipe()
self.spawn(f, sender)
return recver | Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2 | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L116-L135 | [
"def pipe(self):\n \"\"\"\n Returns a `Pipe`_ `Pair`_.\n \"\"\"\n return vanilla.message.Pipe(self)\n",
"def spawn(self, f, *a):\n \"\"\"\n Schedules a new green thread to be created to run *f(\\*a)* on the next\n available tick::\n\n def echo(pipe, s):\n pipe.send(s)\n\n p = h.pipe()\n h.spawn(echo, p, 'hi')\n p.recv() # returns 'hi'\n \"\"\"\n self.ready.append((f, a))\n"
] | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.pulse | python | def pulse(self, ms, item=True):
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _ | Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L149-L171 | [
"def producer(self, f):\n \"\"\"\n Convenience to create a `Pipe`_. *f* is a callable that takes the\n `Sender`_ end of this Pipe and the corresponding `Recver`_ is\n returned::\n\n def counter(sender):\n i = 0\n while True:\n i += 1\n sender.send(i)\n\n recver = h.producer(counter)\n\n recver.recv() # returns 1\n recver.recv() # returns 2\n \"\"\"\n sender, recver = self.pipe()\n self.spawn(f, sender)\n return recver\n"
] | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.channel | python | def channel(self, size=-1):
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer())) | ::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between. | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L200-L217 | [
"def dealer(self):\n \"\"\"\n Returns a `Dealer`_ `Pair`_.\n \"\"\"\n return vanilla.message.Dealer(self)\n",
"def router(self):\n \"\"\"\n Returns a `Router`_ `Pair`_.\n \"\"\"\n return vanilla.message.Router(self)\n",
"def queue(self, size):\n \"\"\"\n Returns a `Queue`_ `Pair`_.\n \"\"\"\n return vanilla.message.Queue(self, size)\n"
] | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.serialize | python | def serialize(self, f):
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _ | Decorator to serialize access to a callable *f* | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L219-L238 | [
"def router(self):\n \"\"\"\n Returns a `Router`_ `Pair`_.\n \"\"\"\n return vanilla.message.Router(self)\n",
"def spawn(self, f, *a):\n \"\"\"\n Schedules a new green thread to be created to run *f(\\*a)* on the next\n available tick::\n\n def echo(pipe, s):\n pipe.send(s)\n\n p = h.pipe()\n h.spawn(echo, p, 'hi')\n p.recv() # returns 'hi'\n \"\"\"\n self.ready.append((f, a))\n"
] | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.state | python | def state(self, state=vanilla.message.NoState):
return vanilla.message.State(self, state=state) | Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state. | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L243-L249 | null | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.select | python | def select(self, ends, timeout=-1):
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item | An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current) | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L254-L295 | [
"def pause(self, timeout=-1):\n if timeout > -1:\n item = self.scheduled.add(\n timeout,\n getcurrent(),\n vanilla.exception.Timeout('timeout: %s' % timeout))\n\n assert getcurrent() != self.loop, \"cannot pause the main loop\"\n\n resume = None\n try:\n resume = self.loop.switch()\n finally:\n if timeout > -1:\n if isinstance(resume, vanilla.exception.Timeout):\n raise resume\n # since we didn't timeout, remove ourselves from scheduled\n self.scheduled.remove(item)\n\n # TODO: rework State's is set test to be more natural\n if self.stopped.recver.ready:\n raise vanilla.exception.Stop(\n 'Hub stopped while we were paused. There must be a deadlock.')\n\n return resume\n"
] | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.throw_to | python | def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.throw(*a) | if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb) | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L327-L333 | null | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.spawn_later | python | def spawn_later(self, ms, f, *a):
self.scheduled.add(ms, f, *a) | Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L349-L361 | null | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.sleep | python | def sleep(self, ms=1):
self.scheduled.add(ms, getcurrent())
self.loop.switch() | Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L363-L379 | null | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
def main(self):
"""
Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled
"""
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events)
|
cablehead/vanilla | vanilla/core.py | Hub.main | python | def main(self):
while True:
while self.ready:
task, a = self.ready.popleft()
self.run_task(task, *a)
if self.scheduled:
timeout = self.scheduled.timeout()
# run overdue scheduled immediately
if timeout < 0:
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
# if nothing registered, just sleep until next scheduled
if not self.registered:
time.sleep(timeout)
task, a = self.scheduled.pop()
self.run_task(task, *a)
continue
else:
timeout = -1
# TODO: add better handling for deadlock
if not self.registered:
self.stopped.send(True)
return
# run poll
events = None
try:
events = self.poll.poll(timeout=timeout)
# IOError from a signal interrupt
except IOError:
pass
if events:
self.spawn(self.dispatch_events, events) | Scheduler steps:
- run ready until exhaustion
- if there's something scheduled
- run overdue scheduled immediately
- or if there's nothing registered, sleep until next scheduled
and then go back to ready
- if there's nothing registered and nothing scheduled, we've
deadlocked, so stopped
- poll on registered, with timeout of next scheduled, if something
is scheduled | train | https://github.com/cablehead/vanilla/blob/c9f5b86f45720a30e8840fb68b1429b919c4ca66/vanilla/core.py#L443-L495 | [
"def spawn(self, f, *a):\n \"\"\"\n Schedules a new green thread to be created to run *f(\\*a)* on the next\n available tick::\n\n def echo(pipe, s):\n pipe.send(s)\n\n p = h.pipe()\n h.spawn(echo, p, 'hi')\n p.recv() # returns 'hi'\n \"\"\"\n self.ready.append((f, a))\n",
"def run_task(self, task, *a):\n try:\n if isinstance(task, greenlet):\n task.switch(*a)\n else:\n greenlet(task).switch(*a)\n except Exception, e:\n self.log.warn('Exception leaked back to main loop', exc_info=e)\n"
] | class Hub(object):
"""
A Vanilla Hub is a handle to a self contained world of interwoven
coroutines. It includes an event loop which is responsibile for scheduling
which green thread should have context. Unlike most asynchronous libraries
this Hub is explicit and must be passed to coroutines that need to interact
with it. This is particularly nice for testing, as it makes it clear what's
going on, and other tests can't inadvertently effect each other.
"""
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__))
self.ready = collections.deque()
self.scheduled = Scheduler()
self.stopped = self.state()
self.registered = {}
self.poll = vanilla.poll.Poll()
self.loop = greenlet(self.main)
def __getattr__(self, name):
# facilitates dynamic plugin look up
try:
package = '.'.join(__name__.split('.')[:-1])
module = importlib.import_module('.'+name, package=package)
plugin = module.__plugin__(self)
setattr(self, name, plugin)
return plugin
except Exception, e:
log.exception(e)
raise AttributeError(
"'Hub' object has no attribute '{name}'\n"
"You may be trying to use a plugin named vanilla.{name}. "
"If you are, you still need to install it".format(
name=name))
def pipe(self):
"""
Returns a `Pipe`_ `Pair`_.
"""
return vanilla.message.Pipe(self)
def producer(self, f):
"""
Convenience to create a `Pipe`_. *f* is a callable that takes the
`Sender`_ end of this Pipe and the corresponding `Recver`_ is
returned::
def counter(sender):
i = 0
while True:
i += 1
sender.send(i)
recver = h.producer(counter)
recver.recv() # returns 1
recver.recv() # returns 2
"""
sender, recver = self.pipe()
self.spawn(f, sender)
return recver
def consumer(self, f):
# TODO: this isn't symmetric with producer. need to rethink
# TODO: don't form a closure
# TODO: test
sender, recver = self.pipe()
@self.spawn
def _():
for item in recver:
f(item)
return sender
def pulse(self, ms, item=True):
"""
Convenience to create a `Pipe`_ that will have *item* sent on it every
*ms* milliseconds. The `Recver`_ end of the Pipe is returned.
Note that since sends to a Pipe block until the Recver is ready, the
pulses will be throttled if the Recver is unable to keep up::
recver = h.pulse(500)
for _ in recver:
log.info('hello') # logs 'hello' every half a second
"""
@self.producer
def _(sender):
while True:
try:
self.sleep(ms)
except vanilla.exception.Halt:
break
sender.send(item)
sender.close()
return _
def trigger(self, f):
def consume(recver, f):
for item in recver:
f()
sender, recver = self.pipe()
self.spawn(consume, recver, f)
sender.trigger = functools.partial(sender.send, True)
return sender
def dealer(self):
"""
Returns a `Dealer`_ `Pair`_.
"""
return vanilla.message.Dealer(self)
def router(self):
"""
Returns a `Router`_ `Pair`_.
"""
return vanilla.message.Router(self)
def queue(self, size):
"""
Returns a `Queue`_ `Pair`_.
"""
return vanilla.message.Queue(self, size)
def channel(self, size=-1):
"""
::
send --\ +---------+ /--> recv
+-> | Channel | -+
send --/ +---------+ \--> recv
A Channel can have many senders and many recvers. By default it is
unbuffered, but you can create buffered Channels by specifying a size.
They're structurally equivalent to channels in Go. It's implementation
is *literally* a `Router`_ piped to a `Dealer`_, with an optional
`Queue`_ in between.
"""
sender, recver = self.router()
if size > 0:
recver = recver.pipe(self.queue(size))
return vanilla.message.Pair(sender, recver.pipe(self.dealer()))
def serialize(self, f):
"""
Decorator to serialize access to a callable *f*
"""
s = self.router()
@self.spawn
def _():
for f, a, kw, r in s.recver:
try:
r.send(f(*a, **kw))
except Exception, e:
r.send(e)
def _(*a, **kw):
r = self.pipe()
s.send((f, a, kw, r))
return r.recv()
return _
def broadcast(self):
return vanilla.message.Broadcast(self)
def state(self, state=vanilla.message.NoState):
"""
Returns a `State`_ `Pair`_.
*state* if supplied sets the intial state.
"""
return vanilla.message.State(self, state=state)
def value(self):
return vanilla.message.Value(self)
def select(self, ends, timeout=-1):
"""
An end is either a `Sender`_ or a `Recver`_. select takes a list of
*ends* and blocks until *one* of them is ready. The select will block
either forever, or until the optional *timeout* is reached. *timeout*
is in milliseconds.
It returns of tuple of (*end*, *value*) where *end* is the end that has
become ready. If the *end* is a `Recver`_, then it will have already
been *recv*'d on which will be available as *value*. For `Sender`_'s
however the sender is still in a ready state waiting for a *send* and
*value* is None.
For example, the following is an appliance that takes an upstream
`Recver`_ and a downstream `Sender`_. Sending to its upstream will
alter it's current state. This state can be read at anytime by
receiving on its downstream::
def state(h, upstream, downstream):
current = None
while True:
end, value = h.select([upstream, downstream])
if end == upstream:
current = value
elif end == downstream:
end.send(current)
"""
for end in ends:
if end.ready:
return end, isinstance(
end, vanilla.message.Recver) and end.recv() or None
for end in ends:
end.select()
try:
fired, item = self.pause(timeout=timeout)
finally:
for end in ends:
end.unselect()
return fired, item
def pause(self, timeout=-1):
if timeout > -1:
item = self.scheduled.add(
timeout,
getcurrent(),
vanilla.exception.Timeout('timeout: %s' % timeout))
assert getcurrent() != self.loop, "cannot pause the main loop"
resume = None
try:
resume = self.loop.switch()
finally:
if timeout > -1:
if isinstance(resume, vanilla.exception.Timeout):
raise resume
# since we didn't timeout, remove ourselves from scheduled
self.scheduled.remove(item)
# TODO: rework State's is set test to be more natural
if self.stopped.recver.ready:
raise vanilla.exception.Stop(
'Hub stopped while we were paused. There must be a deadlock.')
return resume
def switch_to(self, target, *a):
self.ready.append((getcurrent(), ()))
return target.switch(*a)
def throw_to(self, target, *a):
self.ready.append((getcurrent(), ()))
"""
if len(a) == 1 and isinstance(a[0], preserve_exception):
return target.throw(a[0].typ, a[0].val, a[0].tb)
"""
return target.throw(*a)
def spawn(self, f, *a):
"""
Schedules a new green thread to be created to run *f(\*a)* on the next
available tick::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn(echo, p, 'hi')
p.recv() # returns 'hi'
"""
self.ready.append((f, a))
def spawn_later(self, ms, f, *a):
"""
Spawns a callable on a new green thread, scheduled for *ms*
milliseconds in the future::
def echo(pipe, s):
pipe.send(s)
p = h.pipe()
h.spawn_later(50, echo, p, 'hi')
p.recv() # returns 'hi' after 50ms
"""
self.scheduled.add(ms, f, *a)
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
def register(self, fd, *masks):
ret = []
self.registered[fd] = {}
for mask in masks:
sender, recver = self.pipe()
self.registered[fd][mask] = sender
ret.append(recver)
self.poll.register(fd, *masks)
if len(ret) == 1:
return ret[0]
return ret
def unregister(self, fd):
if fd in self.registered:
masks = self.registered.pop(fd)
try:
self.poll.unregister(fd, *(masks.keys()))
except:
pass
for mask in masks:
masks[mask].close()
def stop(self):
self.sleep(1)
for fd, masks in self.registered.items():
for mask, sender in masks.items():
sender.stop()
while self.scheduled:
task, a = self.scheduled.pop()
self.throw_to(task, vanilla.exception.Stop('stop'))
try:
self.stopped.recv()
except vanilla.exception.Halt:
return
def stop_on_term(self):
self.signal.subscribe(signal.SIGINT, signal.SIGTERM).recv()
self.stop()
def run_task(self, task, *a):
try:
if isinstance(task, greenlet):
task.switch(*a)
else:
greenlet(task).switch(*a)
except Exception, e:
self.log.warn('Exception leaked back to main loop', exc_info=e)
def dispatch_events(self, events):
for fd, mask in events:
if fd in self.registered:
masks = self.registered[fd]
if mask == vanilla.poll.POLLERR:
for sender in masks.values():
sender.close()
else:
if masks[mask].ready:
masks[mask].send(True)
|
phdata/sdc-api-tool | sdctool/sdc_tool.py | define_system_args | python | def define_system_args(subparsers):
system_parser = subparsers.add_parser("system", help='Available commands: \'info\'')
system_subparsers = system_parser.add_subparsers(help='System commands')
# system info arguments
info_parser = system_subparsers.add_parser('info', help='Get system status information')
info_parser.add_argument('--src', required=True, dest='src', metavar='src',
help='The instance name of the target SDC (must match the name in sdc-hosts.yml)')
info_parser.set_defaults(func=info_command) | Append the parser arguments for the 'system' commands | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/sdc_tool.py#L142-L151 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from . import conf
from . import commands
import sys
import json
logging.basicConfig(level=logging.DEBUG)
config = conf.Conf()
def promote_command(args):
return commands.promote_pipeline(config, args)
def export_command(args):
return commands.export_pipeline(config, args)
def import_command(args):
return commands.import_pipeline(config, args)
def info_command(args):
return commands.system_info(config, args)
def start_command(args):
return commands.start_pipeline(config, args)
def stop_command(args):
return commands.stop_pipeline(config, args)
def validate_command(args):
return commands.validate_pipeline(config, args)
def status_command(args):
return commands.status_pipeline(config, args)
def define_pipeline_args(subparsers):
pipeline_parser = \
subparsers.add_parser("pipeline",
help='''Available commands: 'import', 'export', 'start', 'stop', 'promote', 'validate', 'status\'''')
pipeline_subparsers = pipeline_parser.add_subparsers(help="Pipeline commands")
# pipeline promote arguments
promote_parser = pipeline_subparsers.add_parser('promote', help='Promote a pipeline from one SDC to another.')
promote_parser.add_argument('--src', required=True,
dest='src_instance',
metavar='source_instance_name',
help='The instance name of the source SDC (must match the name in sdc-hosts.yml)')
promote_parser.add_argument('--dest',
required=True,
dest='dest_instance',
metavar='dest_instance_name',
help='The instance name of the destination SDC (must match the name in sdc-hosts.yml)')
promote_parser.add_argument('--srcPipelineId',
required=True,
dest='src_pipeline_id',
metavar='source-pipeline-id',
help='The ID of a pipeline in the source SDC')
promote_parser.add_argument('--destPipelineId',
required=False, dest='dest_pipeline_id',
metavar='destination-pipeline-id',
help='The ID of a pipeline in the destination SDC')
promote_parser.add_argument('--start',
action='store_true', dest='start_dest',
help='Start the destination pipeline if the import is successful.')
promote_parser.set_defaults(func=promote_command)
# pipeline export arguments
export_parser = pipeline_subparsers.add_parser('export', help='Export a pipeline to a file.')
export_parser.add_argument('--src',
required=True,
dest='src_instance', metavar='source',
help='The instance name of the source SDC (must match the name in sdc-hosts.yml)')
export_parser.add_argument('--pipelineId',
required=True,
dest='src_pipeline_id',
metavar='sourcePipelineId', help='The ID of a pipeline in the source SDC')
export_parser.add_argument('--out', required=True, dest='out', help='Output file path')
export_parser.set_defaults(func=export_command)
# pipeline import arguments
import_parser = pipeline_subparsers.add_parser('import', help='Import a pipeline from a JSON file.')
import_parser.add_argument('--dest', required=True, dest='dst_instance', metavar='dest_instance',
help='The name of the destination SDC (must match an instance name in sdc-hosts.yml)')
import_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
import_parser.add_argument('--pipelineJson', required=True, dest='pipeline_json', help='Pipeline json file path')
import_parser.add_argument('--overwrite', required=False, action='store_true', dest='overwrite', help='Overwrite existing pipeline')
import_parser.set_defaults(func=import_command)
# pipeline start arguments
start_parser = pipeline_subparsers.add_parser('start', help='Start a pipeline.')
start_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The name of the destination SDC (must match an instance name in sdc-hosts.yml)')
start_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
start_parser.add_argument('--runtimeParameters', required=False, dest='runtime_parameters',
metavar='{"HOST": "host1"}', help='JSON blob of runtime parameters')
start_parser.set_defaults(func=start_command)
# pipeline stop arguments
stop_parser = pipeline_subparsers.add_parser('stop', help='Stop a pipeline.')
stop_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')
stop_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
stop_parser.set_defaults(func=stop_command)
# pipeline validate arguments
validate_parser = pipeline_subparsers.add_parser('validate', help='Validate a pipeline and show issues.')
validate_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')
validate_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
validate_parser.set_defaults(func=validate_command)
# pipeline status arguments
validate_parser = pipeline_subparsers.add_parser('status', help='Retrieve current status of a pipeline.')
validate_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')
validate_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
validate_parser.set_defaults(func=status_command)
def run_with_args(args):
"""Main script entry point."""
parser = argparse.ArgumentParser(description='StreamSets Data Collector tools.')
subparsers = parser.add_subparsers(help='sdc-tool')
define_pipeline_args(subparsers)
define_system_args(subparsers)
args = parser.parse_args(args=args)
result = args.func(args)
return result
def main():
print(json.dumps(run_with_args(args = sys.argv[1:]), indent=4, sort_keys=False))
if __name__ == '__main__':
main()
|
phdata/sdc-api-tool | sdctool/sdc_tool.py | run_with_args | python | def run_with_args(args):
parser = argparse.ArgumentParser(description='StreamSets Data Collector tools.')
subparsers = parser.add_subparsers(help='sdc-tool')
define_pipeline_args(subparsers)
define_system_args(subparsers)
args = parser.parse_args(args=args)
result = args.func(args)
return result | Main script entry point. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/sdc_tool.py#L154-L164 | [
"def define_pipeline_args(subparsers):\n\n pipeline_parser = \\\n subparsers.add_parser(\"pipeline\",\n help='''Available commands: 'import', 'export', 'start', 'stop', 'promote', 'validate', 'status\\'''')\n\n pipeline_subparsers = pipeline_parser.add_subparsers(help=\"Pipeline commands\")\n\n # pipeline promote arguments\n promote_parser = pipeline_subparsers.add_parser('promote', help='Promote a pipeline from one SDC to another.')\n promote_parser.add_argument('--src', required=True,\n dest='src_instance',\n metavar='source_instance_name',\n help='The instance name of the source SDC (must match the name in sdc-hosts.yml)')\n promote_parser.add_argument('--dest',\n required=True,\n dest='dest_instance',\n metavar='dest_instance_name',\n help='The instance name of the destination SDC (must match the name in sdc-hosts.yml)')\n promote_parser.add_argument('--srcPipelineId',\n required=True,\n dest='src_pipeline_id',\n metavar='source-pipeline-id',\n help='The ID of a pipeline in the source SDC')\n promote_parser.add_argument('--destPipelineId',\n required=False, dest='dest_pipeline_id',\n metavar='destination-pipeline-id',\n help='The ID of a pipeline in the destination SDC')\n promote_parser.add_argument('--start',\n action='store_true', dest='start_dest',\n help='Start the destination pipeline if the import is successful.')\n\n promote_parser.set_defaults(func=promote_command)\n\n # pipeline export arguments\n export_parser = pipeline_subparsers.add_parser('export', help='Export a pipeline to a file.')\n export_parser.add_argument('--src',\n required=True,\n dest='src_instance', metavar='source',\n help='The instance name of the source SDC (must match the name in sdc-hosts.yml)')\n export_parser.add_argument('--pipelineId',\n required=True,\n dest='src_pipeline_id',\n metavar='sourcePipelineId', help='The ID of a pipeline in the source SDC')\n export_parser.add_argument('--out', required=True, dest='out', help='Output file path')\n export_parser.set_defaults(func=export_command)\n\n # pipeline import arguments\n import_parser = pipeline_subparsers.add_parser('import', help='Import a pipeline from a JSON file.')\n import_parser.add_argument('--dest', required=True, dest='dst_instance', metavar='dest_instance',\n help='The name of the destination SDC (must match an instance name in sdc-hosts.yml)')\n import_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',\n metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')\n import_parser.add_argument('--pipelineJson', required=True, dest='pipeline_json', help='Pipeline json file path')\n import_parser.add_argument('--overwrite', required=False, action='store_true', dest='overwrite', help='Overwrite existing pipeline')\n import_parser.set_defaults(func=import_command)\n\n # pipeline start arguments\n start_parser = pipeline_subparsers.add_parser('start', help='Start a pipeline.')\n start_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',\n help='The name of the destination SDC (must match an instance name in sdc-hosts.yml)')\n start_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',\n metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')\n start_parser.add_argument('--runtimeParameters', required=False, dest='runtime_parameters',\n metavar='{\"HOST\": \"host1\"}', help='JSON blob of runtime parameters')\n start_parser.set_defaults(func=start_command)\n\n # pipeline stop arguments\n stop_parser = pipeline_subparsers.add_parser('stop', help='Stop a pipeline.')\n stop_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',\n help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')\n stop_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',\n metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')\n stop_parser.set_defaults(func=stop_command)\n\n # pipeline validate arguments\n validate_parser = pipeline_subparsers.add_parser('validate', help='Validate a pipeline and show issues.')\n validate_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',\n help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')\n validate_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',\n metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')\n validate_parser.set_defaults(func=validate_command)\n\n # pipeline status arguments\n validate_parser = pipeline_subparsers.add_parser('status', help='Retrieve current status of a pipeline.')\n validate_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',\n help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')\n validate_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',\n metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')\n validate_parser.set_defaults(func=status_command)\n",
"def define_system_args(subparsers):\n \"\"\"Append the parser arguments for the 'system' commands\"\"\"\n system_parser = subparsers.add_parser(\"system\", help='Available commands: \\'info\\'')\n system_subparsers = system_parser.add_subparsers(help='System commands')\n\n # system info arguments\n info_parser = system_subparsers.add_parser('info', help='Get system status information')\n info_parser.add_argument('--src', required=True, dest='src', metavar='src',\n help='The instance name of the target SDC (must match the name in sdc-hosts.yml)')\n info_parser.set_defaults(func=info_command)\n"
] | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from . import conf
from . import commands
import sys
import json
logging.basicConfig(level=logging.DEBUG)
config = conf.Conf()
def promote_command(args):
return commands.promote_pipeline(config, args)
def export_command(args):
return commands.export_pipeline(config, args)
def import_command(args):
return commands.import_pipeline(config, args)
def info_command(args):
return commands.system_info(config, args)
def start_command(args):
return commands.start_pipeline(config, args)
def stop_command(args):
return commands.stop_pipeline(config, args)
def validate_command(args):
return commands.validate_pipeline(config, args)
def status_command(args):
return commands.status_pipeline(config, args)
def define_pipeline_args(subparsers):
pipeline_parser = \
subparsers.add_parser("pipeline",
help='''Available commands: 'import', 'export', 'start', 'stop', 'promote', 'validate', 'status\'''')
pipeline_subparsers = pipeline_parser.add_subparsers(help="Pipeline commands")
# pipeline promote arguments
promote_parser = pipeline_subparsers.add_parser('promote', help='Promote a pipeline from one SDC to another.')
promote_parser.add_argument('--src', required=True,
dest='src_instance',
metavar='source_instance_name',
help='The instance name of the source SDC (must match the name in sdc-hosts.yml)')
promote_parser.add_argument('--dest',
required=True,
dest='dest_instance',
metavar='dest_instance_name',
help='The instance name of the destination SDC (must match the name in sdc-hosts.yml)')
promote_parser.add_argument('--srcPipelineId',
required=True,
dest='src_pipeline_id',
metavar='source-pipeline-id',
help='The ID of a pipeline in the source SDC')
promote_parser.add_argument('--destPipelineId',
required=False, dest='dest_pipeline_id',
metavar='destination-pipeline-id',
help='The ID of a pipeline in the destination SDC')
promote_parser.add_argument('--start',
action='store_true', dest='start_dest',
help='Start the destination pipeline if the import is successful.')
promote_parser.set_defaults(func=promote_command)
# pipeline export arguments
export_parser = pipeline_subparsers.add_parser('export', help='Export a pipeline to a file.')
export_parser.add_argument('--src',
required=True,
dest='src_instance', metavar='source',
help='The instance name of the source SDC (must match the name in sdc-hosts.yml)')
export_parser.add_argument('--pipelineId',
required=True,
dest='src_pipeline_id',
metavar='sourcePipelineId', help='The ID of a pipeline in the source SDC')
export_parser.add_argument('--out', required=True, dest='out', help='Output file path')
export_parser.set_defaults(func=export_command)
# pipeline import arguments
import_parser = pipeline_subparsers.add_parser('import', help='Import a pipeline from a JSON file.')
import_parser.add_argument('--dest', required=True, dest='dst_instance', metavar='dest_instance',
help='The name of the destination SDC (must match an instance name in sdc-hosts.yml)')
import_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
import_parser.add_argument('--pipelineJson', required=True, dest='pipeline_json', help='Pipeline json file path')
import_parser.add_argument('--overwrite', required=False, action='store_true', dest='overwrite', help='Overwrite existing pipeline')
import_parser.set_defaults(func=import_command)
# pipeline start arguments
start_parser = pipeline_subparsers.add_parser('start', help='Start a pipeline.')
start_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The name of the destination SDC (must match an instance name in sdc-hosts.yml)')
start_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
start_parser.add_argument('--runtimeParameters', required=False, dest='runtime_parameters',
metavar='{"HOST": "host1"}', help='JSON blob of runtime parameters')
start_parser.set_defaults(func=start_command)
# pipeline stop arguments
stop_parser = pipeline_subparsers.add_parser('stop', help='Stop a pipeline.')
stop_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')
stop_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
stop_parser.set_defaults(func=stop_command)
# pipeline validate arguments
validate_parser = pipeline_subparsers.add_parser('validate', help='Validate a pipeline and show issues.')
validate_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')
validate_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
validate_parser.set_defaults(func=validate_command)
# pipeline status arguments
validate_parser = pipeline_subparsers.add_parser('status', help='Retrieve current status of a pipeline.')
validate_parser.add_argument('--host', required=True, dest='host_instance', metavar='host_instance',
help='The instance name of the target SDC (must match an instance name in sdc-hosts.yml)')
validate_parser.add_argument('--pipelineId', required=True, dest='pipeline_id',
metavar='destinationPipelineId', help='The ID of a pipeline in the source SDC')
validate_parser.set_defaults(func=status_command)
def define_system_args(subparsers):
"""Append the parser arguments for the 'system' commands"""
system_parser = subparsers.add_parser("system", help='Available commands: \'info\'')
system_subparsers = system_parser.add_subparsers(help='System commands')
# system info arguments
info_parser = system_subparsers.add_parser('info', help='Get system status information')
info_parser.add_argument('--src', required=True, dest='src', metavar='src',
help='The instance name of the target SDC (must match the name in sdc-hosts.yml)')
info_parser.set_defaults(func=info_command)
def main():
print(json.dumps(run_with_args(args = sys.argv[1:]), indent=4, sort_keys=False))
if __name__ == '__main__':
main()
|
phdata/sdc-api-tool | sdctool/api.py | start_pipeline | python | def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json() | Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L29-L50 | [
"def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):\n status = \"\"\n current_iterations = POLL_ITERATIONS\n\n while status != target and current_iterations > 0:\n print(status)\n time.sleep(POLLING_SECONDS)\n status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']\n\n if current_iterations == 0:\n raise 'pipeline status timed out after {} seconds. Current status \\'{}\\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)\n"
] | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | export_pipeline | python | def export_pipeline(url, pipeline_id, auth, verify_ssl):
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json() | Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L66-L83 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | pipeline_status | python | def pipeline_status(url, pipeline_id, auth, verify_ssl):
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json() | Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L86-L103 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | preview_status | python | def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json() | Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L105-L122 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | stop_pipeline | python | def stop_pipeline(url, pipeline_id, auth, verify_ssl):
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json() | Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L131-L153 | [
"def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):\n status = \"\"\n current_iterations = POLL_ITERATIONS\n\n while status != target and current_iterations > 0:\n print(status)\n time.sleep(POLLING_SECONDS)\n status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']\n\n if current_iterations == 0:\n raise 'pipeline status timed out after {} seconds. Current status \\'{}\\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)\n"
] | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | validate_pipeline | python | def validate_pipeline(url, pipeline_id, auth, verify_ssl):
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json() | Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L156-L178 | [
"def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):\n status = VALIDATING\n while status == VALIDATING:\n time.sleep(POLLING_SECONDS)\n status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']\n logging.debug('poll_validation status: {}'.format(status))\n"
] | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | pipeline_exists | python | def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False | :param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean) | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L180-L191 | [
"def pipeline_status(url, pipeline_id, auth, verify_ssl):\n \"\"\"Retrieve the current status for a pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n status_result.raise_for_status()\n logging.debug('Status request: ' + url + '/status')\n logging.debug(status_result.json())\n return status_result.json()\n"
] | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | import_pipeline | python | def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json() | Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L194-L220 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | create_pipeline | python | def create_pipeline(url, auth, json_payload, verify_ssl):
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json | Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L223-L245 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def system_info(url, auth, verify_ssl):
"""Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password.
"""
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json()
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/api.py | system_info | python | def system_info(url, auth, verify_ssl):
sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
sysinfo_response.raise_for_status()
return sysinfo_response.json() | Retrieve SDC system information.
Args:
url (str): the host url.
auth (tuple): a tuple of username, and password. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/api.py#L247-L257 | null | # Copyright 2017 phData Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import logging
import requests
import time
# required custom header for all SDC REST requests.
X_REQ_BY = {'X-Requested-By': 'pipeline-utils'}
POLLING_SECONDS = 1
POLL_ITERATIONS = 45
STATUS_STOPPED = 'STOPPED'
STATUS_RUNNING = 'RUNNING'
VALIDATING = 'VALIDATING'
def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):
"""Start a running pipeline. The API waits for the pipeline to be fully started.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
runtime_parameters (dict): the desired runtime parameters for the pipeline.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
start_result = requests.post(url + '/' + pipeline_id + '/start',
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)
start_result.raise_for_status()
logging.info('Pipeline start requested.')
poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)
logging.info("Pipeline started.")
return start_result.json()
def poll_pipeline_status(target, url, pipeline_id, auth, verify_ssl):
status = ""
current_iterations = POLL_ITERATIONS
while status != target and current_iterations > 0:
print(status)
time.sleep(POLLING_SECONDS)
status = pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
if current_iterations == 0:
raise 'pipeline status timed out after {} seconds. Current status \'{}\''.format(str(POLL_ITERATIONS / POLLING_SECONDS), status)
def export_pipeline(url, pipeline_id, auth, verify_ssl):
"""Export the config and rules for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
if export_result.status_code == 404:
logging.error('Pipeline not found: ' + pipeline_id)
export_result.raise_for_status()
return export_result.json()
def pipeline_status(url, pipeline_id, auth, verify_ssl):
"""Retrieve the current status for a pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
status_result.raise_for_status()
logging.debug('Status request: ' + url + '/status')
logging.debug(status_result.json())
return status_result.json()
def preview_status(url, pipeline_id, previewer_id, auth, verify_ssl):
"""Retrieve the current status for a preview.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
previewer_id (str): the previewer id created by starting a preview or validation
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
preview_status = requests.get(url + '/' + pipeline_id + '/preview/' + previewer_id + "/status", headers=X_REQ_BY, auth=auth, verify=verify_ssl)
preview_status.raise_for_status()
logging.debug(preview_status.json())
return preview_status.json()
def poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl):
status = VALIDATING
while status == VALIDATING:
time.sleep(POLLING_SECONDS)
status = preview_status(url, pipeline_id, previewer_id, auth, verify_ssl)['status']
logging.debug('poll_validation status: {}'.format(status))
def stop_pipeline(url, pipeline_id, auth, verify_ssl):
"""Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
stop_result.raise_for_status()
logging.info("Pipeline stop requested.")
poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)
logging.info('Pipeline stopped.')
return stop_result.json()
def validate_pipeline(url, pipeline_id, auth, verify_ssl):
"""Validate a pipeline and show issues.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)
validate_result.raise_for_status()
previewer_id = validate_result.json()['previewerId']
poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)
preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)
logging.debug('result content: {}'.format(preview_result.content))
return preview_result.json()
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):
"""Import a pipeline.
This will completely overwrite the existing pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
pipeline_id (str): the ID of of the exported pipeline.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json payload as a dictionary.
overwrite (bool): overwrite existing pipeline
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
parameters = { 'overwrite' : overwrite }
import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,
headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)
if import_result.status_code != 200:
logging.error('Import error response: ' + import_result.text)
import_result.raise_for_status()
logging.info('Pipeline import successful.')
return import_result.json()
def create_pipeline(url, auth, json_payload, verify_ssl):
"""Create a new pipeline.
Args:
url (str): the host url in the form 'http://host:port/'.
auth (tuple): a tuple of username, and password.
json_payload (dict): the exported json paylod as a dictionary.
verify_ssl (bool): whether to verify ssl certificates
Returns:
dict: the response json
"""
title = json_payload['pipelineConfig']['title']
description = json_payload['pipelineConfig']['description']
params = {'description':description, 'autoGeneratePipelineId':True}
logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)
put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)
put_result.raise_for_status()
create_json = put_result.json()
logging.debug(create_json)
logging.info('Pipeline creation successful.')
return create_json
def build_pipeline_url(host_url):
"""Formats the url to include the path parts for the pipeline REST API."""
return _base_url(host_url) + '/pipeline'
def build_system_url(host_url):
"""Formats the url to include the path parts for the system REST API."""
return _base_url(host_url) + '/system'
def _base_url(host_url):
return re.sub(r'/$', '', host_url) + '/rest/v1'
|
phdata/sdc-api-tool | sdctool/commands.py | export_pipeline | python | def export_pipeline(conf, args):
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '') | Export a pipeline to json. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L7-L18 | [
"def export_pipeline(url, pipeline_id, auth, verify_ssl):\n \"\"\"Export the config and rules for a pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n if export_result.status_code == 404:\n logging.error('Pipeline not found: ' + pipeline_id)\n export_result.raise_for_status()\n return export_result.json()\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | import_pipeline | python | def import_pipeline(conf, args):
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite) | Import a pipeline from json. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L20-L29 | [
"def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):\n \"\"\"Import a pipeline.\n\n This will completely overwrite the existing pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n json_payload (dict): the exported json payload as a dictionary.\n overwrite (bool): overwrite existing pipeline\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n parameters = { 'overwrite' : overwrite }\n import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,\n headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)\n\n if import_result.status_code != 200:\n logging.error('Import error response: ' + import_result.text)\n\n import_result.raise_for_status()\n logging.info('Pipeline import successful.')\n return import_result.json()\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | promote_pipeline | python | def promote_pipeline(conf, args):
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result | Export a pipeline from a lower environment and import into higher environment. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L31-L57 | [
"def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):\n \"\"\"Start a running pipeline. The API waits for the pipeline to be fully started.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n runtime_parameters (dict): the desired runtime parameters for the pipeline.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n \"\"\"\n start_result = requests.post(url + '/' + pipeline_id + '/start',\n headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)\n start_result.raise_for_status()\n logging.info('Pipeline start requested.')\n\n poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)\n\n logging.info(\"Pipeline started.\")\n return start_result.json()\n",
"def pipeline_status(url, pipeline_id, auth, verify_ssl):\n \"\"\"Retrieve the current status for a pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n status_result.raise_for_status()\n logging.debug('Status request: ' + url + '/status')\n logging.debug(status_result.json())\n return status_result.json()\n",
"def export_pipeline(url, pipeline_id, auth, verify_ssl):\n \"\"\"Export the config and rules for a pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n export_result = requests.get(url + '/' + pipeline_id + '/export', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n if export_result.status_code == 404:\n logging.error('Pipeline not found: ' + pipeline_id)\n export_result.raise_for_status()\n return export_result.json()\n",
"def stop_pipeline(url, pipeline_id, auth, verify_ssl):\n \"\"\"Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n\n stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n stop_result.raise_for_status()\n\n logging.info(\"Pipeline stop requested.\")\n\n poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)\n\n logging.info('Pipeline stopped.')\n return stop_result.json()\n",
"def import_pipeline(url, pipeline_id, auth, json_payload, verify_ssl, overwrite = False):\n \"\"\"Import a pipeline.\n\n This will completely overwrite the existing pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n json_payload (dict): the exported json payload as a dictionary.\n overwrite (bool): overwrite existing pipeline\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n parameters = { 'overwrite' : overwrite }\n import_result = requests.post(url + '/' + pipeline_id + '/import', params=parameters,\n headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=json_payload)\n\n if import_result.status_code != 200:\n logging.error('Import error response: ' + import_result.text)\n\n import_result.raise_for_status()\n logging.info('Pipeline import successful.')\n return import_result.json()\n",
"def create_pipeline(url, auth, json_payload, verify_ssl):\n \"\"\"Create a new pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n auth (tuple): a tuple of username, and password.\n json_payload (dict): the exported json paylod as a dictionary.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n title = json_payload['pipelineConfig']['title']\n description = json_payload['pipelineConfig']['description']\n params = {'description':description, 'autoGeneratePipelineId':True}\n logging.info('No destination pipeline ID provided. Creating a new pipeline: ' + title)\n put_result = requests.put(url + '/' + title, params=params, headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n put_result.raise_for_status()\n create_json = put_result.json()\n logging.debug(create_json)\n logging.info('Pipeline creation successful.')\n return create_json\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | start_pipeline | python | def start_pipeline(conf, args):
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result | Start a pipeline | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L59-L70 | [
"def start_pipeline(url, pipeline_id, auth, verify_ssl, runtime_parameters={}):\n \"\"\"Start a running pipeline. The API waits for the pipeline to be fully started.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n runtime_parameters (dict): the desired runtime parameters for the pipeline.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n \"\"\"\n start_result = requests.post(url + '/' + pipeline_id + '/start',\n headers=X_REQ_BY, auth=auth, verify=verify_ssl, json=runtime_parameters)\n start_result.raise_for_status()\n logging.info('Pipeline start requested.')\n\n poll_pipeline_status(STATUS_RUNNING, url, pipeline_id, auth, verify_ssl)\n\n logging.info(\"Pipeline started.\")\n return start_result.json()\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | stop_pipeline | python | def stop_pipeline(conf, args):
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result | Stop a pipeline. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L72-L80 | [
"def stop_pipeline(url, pipeline_id, auth, verify_ssl):\n \"\"\"Stop a running pipeline. The API waits for the pipeline to be 'STOPPED' before returning.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n\n stop_result = requests.post(url + '/' + pipeline_id + '/stop', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n stop_result.raise_for_status()\n\n logging.info(\"Pipeline stop requested.\")\n\n poll_pipeline_status(STATUS_STOPPED, url, pipeline_id, auth, verify_ssl)\n\n logging.info('Pipeline stopped.')\n return stop_result.json()\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | system_info | python | def system_info(conf, args):
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json | Retieve SDC system information. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L82-L90 | [
"def system_info(url, auth, verify_ssl):\n \"\"\"Retrieve SDC system information.\n\n Args:\n url (str): the host url.\n auth (tuple): a tuple of username, and password.\n\n \"\"\"\n sysinfo_response = requests.get(url + '/info', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n sysinfo_response.raise_for_status()\n return sysinfo_response.json()\n",
"def build_system_url(host_url):\n \"\"\"Formats the url to include the path parts for the system REST API.\"\"\"\n return _base_url(host_url) + '/system'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | validate_pipeline | python | def validate_pipeline(conf, args):
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result | Validate a pipeline configuration. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L92-L100 | [
"def validate_pipeline(url, pipeline_id, auth, verify_ssl):\n \"\"\"Validate a pipeline and show issues.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n\n validate_result = requests.get(url + '/' + pipeline_id + '/validate', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n validate_result.raise_for_status()\n previewer_id = validate_result.json()['previewerId']\n poll_validation_status(url, pipeline_id, previewer_id, auth, verify_ssl)\n\n preview_result = requests.get(url + '/' + pipeline_id + '/preview/' + validate_result.json()['previewerId'], headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n logging.debug('result content: {}'.format(preview_result.content))\n\n return preview_result.json()\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def status_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status']
|
phdata/sdc-api-tool | sdctool/commands.py | status_pipeline | python | def status_pipeline(conf, args):
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
status_result = api.pipeline_status(url, args.pipeline_id, auth, verify_ssl)
return status_result['status'] | Stop a pipeline. | train | https://github.com/phdata/sdc-api-tool/blob/8c86cfa89773ad411226264293d5b574194045de/sdctool/commands.py#L102-L110 | [
"def pipeline_status(url, pipeline_id, auth, verify_ssl):\n \"\"\"Retrieve the current status for a pipeline.\n\n Args:\n url (str): the host url in the form 'http://host:port/'.\n pipeline_id (str): the ID of of the exported pipeline.\n auth (tuple): a tuple of username, and password.\n verify_ssl (bool): whether to verify ssl certificates\n\n Returns:\n dict: the response json\n\n \"\"\"\n status_result = requests.get(url + '/' + pipeline_id + '/status', headers=X_REQ_BY, auth=auth, verify=verify_ssl)\n status_result.raise_for_status()\n logging.debug('Status request: ' + url + '/status')\n logging.debug(status_result.json())\n return status_result.json()\n",
"def build_pipeline_url(host_url):\n \"\"\"Formats the url to include the path parts for the pipeline REST API.\"\"\"\n return _base_url(host_url) + '/pipeline'\n",
"def build_instance_url(instance_conf):\n return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])\n"
] | from . import api
import json
def build_instance_url(instance_conf):
return instance_conf['protocol'] + '://' + instance_conf['host'] + ':' + str(instance_conf['port'])
def export_pipeline(conf, args):
"""Export a pipeline to json."""
# Export the source pipeline and save it to file
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'],
conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
with open(args.out, 'w') as outFile:
outFile.write(json.dumps(export_json, indent=4, sort_keys=False))
return (0, '')
def import_pipeline(conf, args):
"""Import a pipeline from json."""
with open(args.pipeline_json) as pipeline_json:
dst = conf.config['instances'][args.dst_instance]
dst_url = api.build_pipeline_url(build_instance_url(dst))
dst_auth = tuple([conf.creds['instances'][args.dst_instance]['user'],
conf.creds['instances'][args.dst_instance]['pass']])
parsed_json = json.load(pipeline_json)
verify_ssl = dst.get('verify_ssl', True)
return api.import_pipeline(dst_url, args.pipeline_id, dst_auth, parsed_json, verify_ssl, overwrite=args.overwrite)
def promote_pipeline(conf, args):
"""Export a pipeline from a lower environment and import into higher environment."""
src = conf.config['instances'][args.src_instance]
src_url = api.build_pipeline_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src_instance]['user'], conf.creds['instances'][args.src_instance]['pass']])
verify_ssl = src.get('verify_ssl', True)
export_json = api.export_pipeline(src_url, args.src_pipeline_id, src_auth, verify_ssl)
# Import the pipeline to the destination
dest = conf.config['instances'][args.dest_instance]
dest_url = api.build_pipeline_url(build_instance_url(dest))
dest_auth = tuple([conf.creds['instances'][args.dest_instance]['user'], conf.creds['instances'][args.dest_instance]['pass']])
dest_pipeline_id = args.dest_pipeline_id
if dest_pipeline_id and api.pipeline_status(dest_url, dest_pipeline_id, dest_auth, verify_ssl)['status'] != api.STATUS_STOPPED:
api.stop_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
else:
# No destination pipeline id was provided, must be a new pipeline.
create_json = api.create_pipeline(dest_url, dest_auth, export_json, verify_ssl)
dest_pipeline_id = create_json['info']['pipelineId']
result = api.import_pipeline(dest_url, dest_pipeline_id, dest_auth, export_json, verify_ssl, overwrite=True)
# Start the imported pipeline
if args.start_dest:
api.start_pipeline(dest_url, dest_pipeline_id, dest_auth, verify_ssl)
return result
def start_pipeline(conf, args):
"""Start a pipeline"""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
runtime_parameters = {}
verify_ssl = host.get('verify_ssl', True)
if args.runtime_parameters:
runtime_parameters = json.loads(args.runtime_parameters)
start_result = api.start_pipeline(url, args.pipeline_id, auth, verify_ssl, runtime_parameters)
return start_result
def stop_pipeline(conf, args):
"""Stop a pipeline."""
host = conf.config['instances'][args.host_instance]
url = api.build_pipeline_url(build_instance_url(host))
auth = tuple([conf.creds['instances'][args.host_instance]['user'], conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
stop_result = api.stop_pipeline(url, args.pipeline_id, auth, verify_ssl)
return stop_result
def system_info(conf, args):
"""Retieve SDC system information."""
src = conf.config['instances'][args.src]
src_url = api.build_system_url(build_instance_url(src))
src_auth = tuple([conf.creds['instances'][args.src]['user'],
conf.creds['instances'][args.src]['pass']])
verify_ssl = src.get('verify_ssl', True)
sysinfo_json = api.system_info(src_url, src_auth, verify_ssl)
return sysinfo_json
def validate_pipeline(conf, args):
"""Validate a pipeline configuration."""
host = conf.config['instances'][args.host_instance]
host_url = api.build_pipeline_url(build_instance_url(host))
host_auth = tuple([conf.creds['instances'][args.host_instance]['user'],
conf.creds['instances'][args.host_instance]['pass']])
verify_ssl = host.get('verify_ssl', True)
validate_result = api.validate_pipeline(host_url, args.pipeline_id, host_auth, verify_ssl)
return validate_result
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder.issue | python | def issue(self, issuance_spec, metadata, fees):
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
) | Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L44-L64 | [
"def _collect_uncolored_outputs(unspent_outputs, amount):\n \"\"\"\n Returns a list of uncolored outputs for the specified amount.\n\n :param list[SpendableOutput] unspent_outputs: The list of available outputs.\n :param int amount: The amount to collect.\n :return: A list of outputs, and the total amount collected.\n :rtype: (list[SpendableOutput], int)\n \"\"\"\n total_amount = 0\n result = []\n for output in unspent_outputs:\n if output.output.asset_id is None:\n result.append(output)\n total_amount += output.output.value\n\n if total_amount >= amount:\n return result, total_amount\n\n raise InsufficientFundsError\n",
"def _get_uncolored_output(self, script, value):\n \"\"\"\n Creates an uncolored output.\n\n :param bytes script: The output script.\n :param int value: The satoshi value of the output.\n :return: An object representing the uncolored output.\n :rtype: TransactionOutput\n \"\"\"\n if value < self._dust_amount:\n raise DustOutputError\n\n return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))\n",
"def _get_colored_output(self, script):\n \"\"\"\n Creates a colored output.\n\n :param bytes script: The output script.\n :return: An object representing the colored output.\n :rtype: TransactionOutput\n \"\"\"\n return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))\n",
"def _get_marker_output(self, asset_quantities, metadata):\n \"\"\"\n Creates a marker output.\n\n :param list[int] asset_quantities: The asset quantity list.\n :param bytes metadata: The metadata contained in the output.\n :return: An object representing the marker output.\n :rtype: TransactionOutput\n \"\"\"\n payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()\n script = openassets.protocol.MarkerOutput.build_script(payload)\n return bitcoin.core.CTxOut(0, script)\n"
] | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder.transfer | python | def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
) | Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L66-L115 | [
"def _collect_uncolored_outputs(unspent_outputs, amount):\n \"\"\"\n Returns a list of uncolored outputs for the specified amount.\n\n :param list[SpendableOutput] unspent_outputs: The list of available outputs.\n :param int amount: The amount to collect.\n :return: A list of outputs, and the total amount collected.\n :rtype: (list[SpendableOutput], int)\n \"\"\"\n total_amount = 0\n result = []\n for output in unspent_outputs:\n if output.output.asset_id is None:\n result.append(output)\n total_amount += output.output.value\n\n if total_amount >= amount:\n return result, total_amount\n\n raise InsufficientFundsError\n",
"def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):\n \"\"\"\n Returns a list of colored outputs for the specified quantity.\n\n :param list[SpendableOutput] unspent_outputs: The list of available outputs.\n :param bytes asset_id: The ID of the asset to collect.\n :param int asset_quantity: The asset quantity to collect.\n :return: A list of outputs, and the total asset quantity collected.\n :rtype: (list[SpendableOutput], int)\n \"\"\"\n total_amount = 0\n result = []\n for output in unspent_outputs:\n if output.output.asset_id == asset_id:\n result.append(output)\n total_amount += output.output.asset_quantity\n\n if total_amount >= asset_quantity:\n return result, total_amount\n\n raise InsufficientAssetQuantityError\n",
"def _get_uncolored_output(self, script, value):\n \"\"\"\n Creates an uncolored output.\n\n :param bytes script: The output script.\n :param int value: The satoshi value of the output.\n :return: An object representing the uncolored output.\n :rtype: TransactionOutput\n \"\"\"\n if value < self._dust_amount:\n raise DustOutputError\n\n return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))\n",
"def _get_colored_output(self, script):\n \"\"\"\n Creates a colored output.\n\n :param bytes script: The output script.\n :return: An object representing the colored output.\n :rtype: TransactionOutput\n \"\"\"\n return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))\n",
"def _get_marker_output(self, asset_quantities, metadata):\n \"\"\"\n Creates a marker output.\n\n :param list[int] asset_quantities: The asset quantity list.\n :param bytes metadata: The metadata contained in the output.\n :return: An object representing the marker output.\n :rtype: TransactionOutput\n \"\"\"\n payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()\n script = openassets.protocol.MarkerOutput.build_script(payload)\n return bitcoin.core.CTxOut(0, script)\n"
] | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder.transfer_assets | python | def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees) | Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L128-L142 | [
"def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):\n \"\"\"\n Creates a transaction for sending assets and bitcoins.\n\n :param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:\n - The first element is the ID of an asset.\n - The second element is the parameters of the transfer.\n :param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.\n :param int fees: The fees to include in the transaction.\n :return: An unsigned transaction for sending assets and bitcoins.\n :rtype: CTransaction\n \"\"\"\n inputs = []\n outputs = []\n asset_quantities = []\n for asset_id, transfer_spec in asset_transfer_specs:\n colored_outputs, collected_amount = self._collect_colored_outputs(\n transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)\n inputs.extend(colored_outputs)\n outputs.append(self._get_colored_output(transfer_spec.to_script))\n asset_quantities.append(transfer_spec.amount)\n\n if collected_amount > transfer_spec.amount:\n outputs.append(self._get_colored_output(transfer_spec.change_script))\n asset_quantities.append(collected_amount - transfer_spec.amount)\n\n btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])\n\n if btc_excess < btc_transfer_spec.amount + fees:\n # Not enough bitcoin inputs\n uncolored_outputs, total_amount = self._collect_uncolored_outputs(\n btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)\n inputs.extend(uncolored_outputs)\n btc_excess += total_amount\n\n change = btc_excess - btc_transfer_spec.amount - fees\n if change > 0:\n # Too much bitcoin in input, send it back as change\n outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))\n\n if btc_transfer_spec.amount > 0:\n outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))\n\n if asset_quantities:\n outputs.insert(0, self._get_marker_output(asset_quantities, b''))\n\n return bitcoin.core.CTransaction(\n vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],\n vout=outputs\n )\n"
] | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder.btc_asset_swap | python | def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees) | Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L144-L155 | [
"def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):\n \"\"\"\n Creates a transaction for sending assets and bitcoins.\n\n :param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:\n - The first element is the ID of an asset.\n - The second element is the parameters of the transfer.\n :param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.\n :param int fees: The fees to include in the transaction.\n :return: An unsigned transaction for sending assets and bitcoins.\n :rtype: CTransaction\n \"\"\"\n inputs = []\n outputs = []\n asset_quantities = []\n for asset_id, transfer_spec in asset_transfer_specs:\n colored_outputs, collected_amount = self._collect_colored_outputs(\n transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)\n inputs.extend(colored_outputs)\n outputs.append(self._get_colored_output(transfer_spec.to_script))\n asset_quantities.append(transfer_spec.amount)\n\n if collected_amount > transfer_spec.amount:\n outputs.append(self._get_colored_output(transfer_spec.change_script))\n asset_quantities.append(collected_amount - transfer_spec.amount)\n\n btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])\n\n if btc_excess < btc_transfer_spec.amount + fees:\n # Not enough bitcoin inputs\n uncolored_outputs, total_amount = self._collect_uncolored_outputs(\n btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)\n inputs.extend(uncolored_outputs)\n btc_excess += total_amount\n\n change = btc_excess - btc_transfer_spec.amount - fees\n if change > 0:\n # Too much bitcoin in input, send it back as change\n outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))\n\n if btc_transfer_spec.amount > 0:\n outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))\n\n if asset_quantities:\n outputs.insert(0, self._get_marker_output(asset_quantities, b''))\n\n return bitcoin.core.CTransaction(\n vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],\n vout=outputs\n )\n"
] | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder.asset_asset_swap | python | def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees) | Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L157-L175 | [
"def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):\n \"\"\"\n Creates a transaction for sending assets and bitcoins.\n\n :param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:\n - The first element is the ID of an asset.\n - The second element is the parameters of the transfer.\n :param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.\n :param int fees: The fees to include in the transaction.\n :return: An unsigned transaction for sending assets and bitcoins.\n :rtype: CTransaction\n \"\"\"\n inputs = []\n outputs = []\n asset_quantities = []\n for asset_id, transfer_spec in asset_transfer_specs:\n colored_outputs, collected_amount = self._collect_colored_outputs(\n transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)\n inputs.extend(colored_outputs)\n outputs.append(self._get_colored_output(transfer_spec.to_script))\n asset_quantities.append(transfer_spec.amount)\n\n if collected_amount > transfer_spec.amount:\n outputs.append(self._get_colored_output(transfer_spec.change_script))\n asset_quantities.append(collected_amount - transfer_spec.amount)\n\n btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])\n\n if btc_excess < btc_transfer_spec.amount + fees:\n # Not enough bitcoin inputs\n uncolored_outputs, total_amount = self._collect_uncolored_outputs(\n btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)\n inputs.extend(uncolored_outputs)\n btc_excess += total_amount\n\n change = btc_excess - btc_transfer_spec.amount - fees\n if change > 0:\n # Too much bitcoin in input, send it back as change\n outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))\n\n if btc_transfer_spec.amount > 0:\n outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))\n\n if asset_quantities:\n outputs.insert(0, self._get_marker_output(asset_quantities, b''))\n\n return bitcoin.core.CTransaction(\n vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],\n vout=outputs\n )\n"
] | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder._collect_uncolored_outputs | python | def _collect_uncolored_outputs(unspent_outputs, amount):
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError | Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int) | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L178-L197 | null | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder._collect_colored_outputs | python | def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError | Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int) | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L200-L220 | null | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder._get_uncolored_output | python | def _get_uncolored_output(self, script, value):
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script)) | Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L222-L234 | null | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder._get_colored_output | python | def _get_colored_output(self, script):
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script)) | Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L236-L244 | null | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_marker_output(self, asset_quantities, metadata):
"""
Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput
"""
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script)
|
OpenAssets/openassets | openassets/transactions.py | TransactionBuilder._get_marker_output | python | def _get_marker_output(self, asset_quantities, metadata):
payload = openassets.protocol.MarkerOutput(asset_quantities, metadata).serialize_payload()
script = openassets.protocol.MarkerOutput.build_script(payload)
return bitcoin.core.CTxOut(0, script) | Creates a marker output.
:param list[int] asset_quantities: The asset quantity list.
:param bytes metadata: The metadata contained in the output.
:return: An object representing the marker output.
:rtype: TransactionOutput | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L246-L257 | [
"def serialize_payload(self):\n \"\"\"\n Serializes the marker output data into a payload buffer.\n\n :return: The serialized payload.\n :rtype: bytes\n \"\"\"\n with io.BytesIO() as stream:\n stream.write(self.OPEN_ASSETS_TAG)\n\n bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)\n for asset_quantity in self.asset_quantities:\n stream.write(self.leb128_encode(asset_quantity))\n\n bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)\n\n stream.write(self.metadata)\n\n return stream.getvalue()\n",
"def build_script(data):\n \"\"\"\n Creates an output script containing an OP_RETURN and a PUSHDATA.\n\n :param bytes data: The content of the PUSHDATA.\n :return: The final script.\n :rtype: CScript\n \"\"\"\n return bitcoin.core.script.CScript(\n bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))\n"
] | class TransactionBuilder(object):
"""Provides methods for constructing Open Assets transactions."""
def __init__(self, dust_amount):
"""
Initializes a new instance of the TransactionBuilder class.
:param int dust_amount: The minimum allowed output value.
"""
self._dust_amount = dust_amount
def issue(self, issuance_spec, metadata, fees):
"""
Creates a transaction for issuing an asset.
:param TransferParameters issuance_spec: The parameters of the issuance.
:param bytes metadata: The metadata to be embedded in the transaction.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for issuing an asset.
:rtype: CTransaction
"""
inputs, total_amount = self._collect_uncolored_outputs(
issuance_spec.unspent_outputs, 2 * self._dust_amount + fees)
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=[
self._get_colored_output(issuance_spec.to_script),
self._get_marker_output([issuance_spec.amount], metadata),
self._get_uncolored_output(issuance_spec.change_script, total_amount - self._dust_amount - fees)
]
)
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees):
"""
Creates a transaction for sending assets and bitcoins.
:param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple:
- The first element is the ID of an asset.
- The second element is the parameters of the transfer.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: An unsigned transaction for sending assets and bitcoins.
:rtype: CTransaction
"""
inputs = []
outputs = []
asset_quantities = []
for asset_id, transfer_spec in asset_transfer_specs:
colored_outputs, collected_amount = self._collect_colored_outputs(
transfer_spec.unspent_outputs, asset_id, transfer_spec.amount)
inputs.extend(colored_outputs)
outputs.append(self._get_colored_output(transfer_spec.to_script))
asset_quantities.append(transfer_spec.amount)
if collected_amount > transfer_spec.amount:
outputs.append(self._get_colored_output(transfer_spec.change_script))
asset_quantities.append(collected_amount - transfer_spec.amount)
btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs])
if btc_excess < btc_transfer_spec.amount + fees:
# Not enough bitcoin inputs
uncolored_outputs, total_amount = self._collect_uncolored_outputs(
btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess)
inputs.extend(uncolored_outputs)
btc_excess += total_amount
change = btc_excess - btc_transfer_spec.amount - fees
if change > 0:
# Too much bitcoin in input, send it back as change
outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change))
if btc_transfer_spec.amount > 0:
outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount))
if asset_quantities:
outputs.insert(0, self._get_marker_output(asset_quantities, b''))
return bitcoin.core.CTransaction(
vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs],
vout=outputs
)
def transfer_bitcoin(self, transfer_spec, fees):
"""
Creates a transaction for sending bitcoins.
:param TransferParameters transfer_spec: The parameters of the bitcoins being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([], transfer_spec, fees)
def transfer_assets(self, asset_id, transfer_spec, btc_change_script, fees):
"""
Creates a transaction for sending an asset.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters transfer_spec: The parameters of the asset being transferred.
:param bytes btc_change_script: The script where to send bitcoin change, if any.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer(
[(asset_id, transfer_spec)],
TransferParameters(transfer_spec.unspent_outputs, None, btc_change_script, 0),
fees)
def btc_asset_swap(self, btc_transfer_spec, asset_id, asset_transfer_spec, fees):
"""
Creates a transaction for swapping assets for bitcoins.
:param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred.
:param bytes asset_id: The ID of the asset being sent.
:param TransferParameters asset_transfer_spec: The parameters of the asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
return self.transfer([(asset_id, asset_transfer_spec)], btc_transfer_spec, fees)
def asset_asset_swap(
self, asset1_id, asset1_transfer_spec, asset2_id, asset2_transfer_spec, fees):
"""
Creates a transaction for swapping an asset for another asset.
:param bytes asset1_id: The ID of the first asset.
:param TransferParameters asset1_transfer_spec: The parameters of the first asset being transferred.
It is also used for paying fees and/or receiving change if any.
:param bytes asset2_id: The ID of the second asset.
:param TransferDetails asset2_transfer_spec: The parameters of the second asset being transferred.
:param int fees: The fees to include in the transaction.
:return: The resulting unsigned transaction.
:rtype: CTransaction
"""
btc_transfer_spec = TransferParameters(
asset1_transfer_spec.unspent_outputs, asset1_transfer_spec.to_script, asset1_transfer_spec.change_script, 0)
return self.transfer(
[(asset1_id, asset1_transfer_spec), (asset2_id, asset2_transfer_spec)], btc_transfer_spec, fees)
@staticmethod
def _collect_uncolored_outputs(unspent_outputs, amount):
"""
Returns a list of uncolored outputs for the specified amount.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param int amount: The amount to collect.
:return: A list of outputs, and the total amount collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id is None:
result.append(output)
total_amount += output.output.value
if total_amount >= amount:
return result, total_amount
raise InsufficientFundsError
@staticmethod
def _collect_colored_outputs(unspent_outputs, asset_id, asset_quantity):
"""
Returns a list of colored outputs for the specified quantity.
:param list[SpendableOutput] unspent_outputs: The list of available outputs.
:param bytes asset_id: The ID of the asset to collect.
:param int asset_quantity: The asset quantity to collect.
:return: A list of outputs, and the total asset quantity collected.
:rtype: (list[SpendableOutput], int)
"""
total_amount = 0
result = []
for output in unspent_outputs:
if output.output.asset_id == asset_id:
result.append(output)
total_amount += output.output.asset_quantity
if total_amount >= asset_quantity:
return result, total_amount
raise InsufficientAssetQuantityError
def _get_uncolored_output(self, script, value):
"""
Creates an uncolored output.
:param bytes script: The output script.
:param int value: The satoshi value of the output.
:return: An object representing the uncolored output.
:rtype: TransactionOutput
"""
if value < self._dust_amount:
raise DustOutputError
return bitcoin.core.CTxOut(value, bitcoin.core.CScript(script))
def _get_colored_output(self, script):
"""
Creates a colored output.
:param bytes script: The output script.
:return: An object representing the colored output.
:rtype: TransactionOutput
"""
return bitcoin.core.CTxOut(self._dust_amount, bitcoin.core.CScript(script))
|
OpenAssets/openassets | openassets/protocol.py | ColoringEngine.get_output | python | def get_output(self, transaction_hash, output_index):
cached_output = yield from self._cache.get(transaction_hash, output_index)
if cached_output is not None:
return cached_output
transaction = yield from self._transaction_provider(transaction_hash)
if transaction is None:
raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash)))
colored_outputs = yield from self.color_transaction(transaction)
for index, output in enumerate(colored_outputs):
yield from self._cache.put(transaction_hash, index, output)
return colored_outputs[output_index] | Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput] | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L54-L78 | null | class ColoringEngine(object):
"""The backtracking engine used to find the asset ID and asset quantity of any output."""
def __init__(self, transaction_provider, cache, event_loop):
"""
Constructs an instance of the ColorEngine class.
:param bytes -> Future[CTransaction] transaction_provider: A function returning a transaction given its hash.
:param OutputCache cache: The cache object to use.
:param BaseEventLoop | None event_loop: The event loop used to schedule asynchronous tasks.
"""
self._transaction_provider = transaction_provider
self._cache = cache
self._loop = event_loop
@asyncio.coroutine
@asyncio.coroutine
def color_transaction(self, transaction):
"""
Computes the asset ID and asset quantity of every output in the transaction.
:param CTransaction transaction: The transaction to color.
:return: A list containing all the colored outputs of the transaction.
:rtype: Future[list[TransactionOutput]]
"""
# If the transaction is a coinbase transaction, the marker output is always invalid
if not transaction.is_coinbase():
for i, output in enumerate(transaction.vout):
# Parse the OP_RETURN script
marker_output_payload = MarkerOutput.parse_script(output.scriptPubKey)
if marker_output_payload is not None:
# Deserialize the payload as a marker output
marker_output = MarkerOutput.deserialize_payload(marker_output_payload)
if marker_output is not None:
# Fetch the colored outputs for previous transactions
inputs = []
for input in transaction.vin:
inputs.append((yield from asyncio.async(
self.get_output(input.prevout.hash, input.prevout.n), loop=self._loop)))
asset_ids = self._compute_asset_ids(
inputs,
i,
transaction.vout,
marker_output.asset_quantities)
if asset_ids is not None:
return asset_ids
# If no valid marker output was found in the transaction, all outputs are considered uncolored
return [
TransactionOutput(output.nValue, output.scriptPubKey, None, 0, OutputType.uncolored)
for output in transaction.vout]
@classmethod
def _compute_asset_ids(cls, inputs, marker_output_index, outputs, asset_quantities):
"""
Computes the asset IDs of every output in a transaction.
:param list[TransactionOutput] inputs: The outputs referenced by the inputs of the transaction.
:param int marker_output_index: The position of the marker output in the transaction.
:param list[CTxOut] outputs: The outputs of the transaction.
:param list[int] asset_quantities: The list of asset quantities of the outputs.
:return: A list of outputs with asset ID and asset quantity information.
:rtype: list[TransactionOutput]
"""
# If there are more items in the asset quantities list than outputs in the transaction (excluding the
# marker output), the marker output is deemed invalid
if len(asset_quantities) > len(outputs) - 1:
return None
# If there is no input in the transaction, the marker output is always invalid
if len(inputs) == 0:
return None
result = []
# Add the issuance outputs
issuance_asset_id = cls.hash_script(bytes(inputs[0].script))
for i in range(0, marker_output_index):
value, script = outputs[i].nValue, outputs[i].scriptPubKey
if i < len(asset_quantities) and asset_quantities[i] > 0:
output = TransactionOutput(value, script, issuance_asset_id, asset_quantities[i], OutputType.issuance)
else:
output = TransactionOutput(value, script, None, 0, OutputType.issuance)
result.append(output)
# Add the marker output
issuance_output = outputs[marker_output_index]
result.append(TransactionOutput(
issuance_output.nValue, issuance_output.scriptPubKey, None, 0, OutputType.marker_output))
# Add the transfer outputs
input_iterator = iter(inputs)
input_units_left = 0
for i in range(marker_output_index + 1, len(outputs)):
if i <= len(asset_quantities):
output_asset_quantity = asset_quantities[i - 1]
else:
output_asset_quantity = 0
output_units_left = output_asset_quantity
asset_id = None
while output_units_left > 0:
# Move to the next input if the current one is depleted
if input_units_left == 0:
current_input = next(input_iterator, None)
if current_input is None:
# There are less asset units available in the input than in the outputs:
# the marker output is considered invalid
return None
else:
input_units_left = current_input.asset_quantity
# If the current input is colored, assign its asset ID to the current output
if current_input.asset_id is not None:
progress = min(input_units_left, output_units_left)
output_units_left -= progress
input_units_left -= progress
if asset_id is None:
# This is the first input to map to this output
asset_id = current_input.asset_id
elif asset_id != current_input.asset_id:
# Another different asset ID has already been assigned to that output:
# the marker output is considered invalid
return None
result.append(TransactionOutput(
outputs[i].nValue, outputs[i].scriptPubKey, asset_id, output_asset_quantity, OutputType.transfer))
return result
@staticmethod
def hash_script(data):
"""
Hashes a script into an asset ID using SHA256 followed by RIPEMD160.
:param bytes data: The data to hash.
"""
sha256 = hashlib.sha256()
ripemd = hashlib.new('ripemd160')
sha256.update(data)
ripemd.update(sha256.digest())
return ripemd.digest()
|
OpenAssets/openassets | openassets/protocol.py | ColoringEngine.color_transaction | python | def color_transaction(self, transaction):
# If the transaction is a coinbase transaction, the marker output is always invalid
if not transaction.is_coinbase():
for i, output in enumerate(transaction.vout):
# Parse the OP_RETURN script
marker_output_payload = MarkerOutput.parse_script(output.scriptPubKey)
if marker_output_payload is not None:
# Deserialize the payload as a marker output
marker_output = MarkerOutput.deserialize_payload(marker_output_payload)
if marker_output is not None:
# Fetch the colored outputs for previous transactions
inputs = []
for input in transaction.vin:
inputs.append((yield from asyncio.async(
self.get_output(input.prevout.hash, input.prevout.n), loop=self._loop)))
asset_ids = self._compute_asset_ids(
inputs,
i,
transaction.vout,
marker_output.asset_quantities)
if asset_ids is not None:
return asset_ids
# If no valid marker output was found in the transaction, all outputs are considered uncolored
return [
TransactionOutput(output.nValue, output.scriptPubKey, None, 0, OutputType.uncolored)
for output in transaction.vout] | Computes the asset ID and asset quantity of every output in the transaction.
:param CTransaction transaction: The transaction to color.
:return: A list containing all the colored outputs of the transaction.
:rtype: Future[list[TransactionOutput]] | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L81-L118 | [
"def deserialize_payload(cls, payload):\n \"\"\"\n Deserializes the marker output payload.\n\n :param bytes payload: A buffer containing the marker output payload.\n :return: The marker output object.\n :rtype: MarkerOutput\n \"\"\"\n with io.BytesIO(payload) as stream:\n\n # The OAP marker and protocol version\n oa_version = stream.read(4)\n if oa_version != cls.OPEN_ASSETS_TAG:\n return None\n\n try:\n # Deserialize the expected number of items in the asset quantity list\n output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)\n\n # LEB128-encoded unsigned integers representing the asset quantity of every output in order\n asset_quantities = []\n for i in range(0, output_count):\n asset_quantity = cls.leb128_decode(stream)\n\n # If the LEB128-encoded asset quantity of any output exceeds 9 bytes,\n # the marker output is deemed invalid\n if asset_quantity > cls.MAX_ASSET_QUANTITY:\n return None\n\n asset_quantities.append(asset_quantity)\n\n # The var-integer encoded length of the metadata field.\n metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)\n\n # The actual metadata\n metadata = stream.read(metadata_length)\n\n # If the metadata string wasn't long enough, the marker output is malformed\n if len(metadata) != metadata_length:\n return None\n\n # If there are bytes left to read, the marker output is malformed\n last_byte = stream.read(1)\n if len(last_byte) > 0:\n return None\n\n except bitcoin.core.SerializationTruncationError:\n return None\n\n return MarkerOutput(asset_quantities, metadata)\n",
"def parse_script(output_script):\n \"\"\"\n Parses an output and returns the payload if the output matches the right pattern for a marker output,\n or None otherwise.\n\n :param CScript output_script: The output script to be parsed.\n :return: The marker output payload if the output fits the pattern, None otherwise.\n :rtype: bytes\n \"\"\"\n script_iterator = output_script.raw_iter()\n\n try:\n first_opcode, _, _ = next(script_iterator, (None, None, None))\n _, data, _ = next(script_iterator, (None, None, None))\n remainder = next(script_iterator, None)\n except bitcoin.core.script.CScriptTruncatedPushDataError:\n return None\n except bitcoin.core.script.CScriptInvalidError:\n return None\n\n if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:\n return data\n else:\n return None\n"
] | class ColoringEngine(object):
"""The backtracking engine used to find the asset ID and asset quantity of any output."""
def __init__(self, transaction_provider, cache, event_loop):
"""
Constructs an instance of the ColorEngine class.
:param bytes -> Future[CTransaction] transaction_provider: A function returning a transaction given its hash.
:param OutputCache cache: The cache object to use.
:param BaseEventLoop | None event_loop: The event loop used to schedule asynchronous tasks.
"""
self._transaction_provider = transaction_provider
self._cache = cache
self._loop = event_loop
@asyncio.coroutine
def get_output(self, transaction_hash, output_index):
"""
Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput]
"""
cached_output = yield from self._cache.get(transaction_hash, output_index)
if cached_output is not None:
return cached_output
transaction = yield from self._transaction_provider(transaction_hash)
if transaction is None:
raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash)))
colored_outputs = yield from self.color_transaction(transaction)
for index, output in enumerate(colored_outputs):
yield from self._cache.put(transaction_hash, index, output)
return colored_outputs[output_index]
@asyncio.coroutine
@classmethod
def _compute_asset_ids(cls, inputs, marker_output_index, outputs, asset_quantities):
"""
Computes the asset IDs of every output in a transaction.
:param list[TransactionOutput] inputs: The outputs referenced by the inputs of the transaction.
:param int marker_output_index: The position of the marker output in the transaction.
:param list[CTxOut] outputs: The outputs of the transaction.
:param list[int] asset_quantities: The list of asset quantities of the outputs.
:return: A list of outputs with asset ID and asset quantity information.
:rtype: list[TransactionOutput]
"""
# If there are more items in the asset quantities list than outputs in the transaction (excluding the
# marker output), the marker output is deemed invalid
if len(asset_quantities) > len(outputs) - 1:
return None
# If there is no input in the transaction, the marker output is always invalid
if len(inputs) == 0:
return None
result = []
# Add the issuance outputs
issuance_asset_id = cls.hash_script(bytes(inputs[0].script))
for i in range(0, marker_output_index):
value, script = outputs[i].nValue, outputs[i].scriptPubKey
if i < len(asset_quantities) and asset_quantities[i] > 0:
output = TransactionOutput(value, script, issuance_asset_id, asset_quantities[i], OutputType.issuance)
else:
output = TransactionOutput(value, script, None, 0, OutputType.issuance)
result.append(output)
# Add the marker output
issuance_output = outputs[marker_output_index]
result.append(TransactionOutput(
issuance_output.nValue, issuance_output.scriptPubKey, None, 0, OutputType.marker_output))
# Add the transfer outputs
input_iterator = iter(inputs)
input_units_left = 0
for i in range(marker_output_index + 1, len(outputs)):
if i <= len(asset_quantities):
output_asset_quantity = asset_quantities[i - 1]
else:
output_asset_quantity = 0
output_units_left = output_asset_quantity
asset_id = None
while output_units_left > 0:
# Move to the next input if the current one is depleted
if input_units_left == 0:
current_input = next(input_iterator, None)
if current_input is None:
# There are less asset units available in the input than in the outputs:
# the marker output is considered invalid
return None
else:
input_units_left = current_input.asset_quantity
# If the current input is colored, assign its asset ID to the current output
if current_input.asset_id is not None:
progress = min(input_units_left, output_units_left)
output_units_left -= progress
input_units_left -= progress
if asset_id is None:
# This is the first input to map to this output
asset_id = current_input.asset_id
elif asset_id != current_input.asset_id:
# Another different asset ID has already been assigned to that output:
# the marker output is considered invalid
return None
result.append(TransactionOutput(
outputs[i].nValue, outputs[i].scriptPubKey, asset_id, output_asset_quantity, OutputType.transfer))
return result
@staticmethod
def hash_script(data):
"""
Hashes a script into an asset ID using SHA256 followed by RIPEMD160.
:param bytes data: The data to hash.
"""
sha256 = hashlib.sha256()
ripemd = hashlib.new('ripemd160')
sha256.update(data)
ripemd.update(sha256.digest())
return ripemd.digest()
|
OpenAssets/openassets | openassets/protocol.py | ColoringEngine._compute_asset_ids | python | def _compute_asset_ids(cls, inputs, marker_output_index, outputs, asset_quantities):
# If there are more items in the asset quantities list than outputs in the transaction (excluding the
# marker output), the marker output is deemed invalid
if len(asset_quantities) > len(outputs) - 1:
return None
# If there is no input in the transaction, the marker output is always invalid
if len(inputs) == 0:
return None
result = []
# Add the issuance outputs
issuance_asset_id = cls.hash_script(bytes(inputs[0].script))
for i in range(0, marker_output_index):
value, script = outputs[i].nValue, outputs[i].scriptPubKey
if i < len(asset_quantities) and asset_quantities[i] > 0:
output = TransactionOutput(value, script, issuance_asset_id, asset_quantities[i], OutputType.issuance)
else:
output = TransactionOutput(value, script, None, 0, OutputType.issuance)
result.append(output)
# Add the marker output
issuance_output = outputs[marker_output_index]
result.append(TransactionOutput(
issuance_output.nValue, issuance_output.scriptPubKey, None, 0, OutputType.marker_output))
# Add the transfer outputs
input_iterator = iter(inputs)
input_units_left = 0
for i in range(marker_output_index + 1, len(outputs)):
if i <= len(asset_quantities):
output_asset_quantity = asset_quantities[i - 1]
else:
output_asset_quantity = 0
output_units_left = output_asset_quantity
asset_id = None
while output_units_left > 0:
# Move to the next input if the current one is depleted
if input_units_left == 0:
current_input = next(input_iterator, None)
if current_input is None:
# There are less asset units available in the input than in the outputs:
# the marker output is considered invalid
return None
else:
input_units_left = current_input.asset_quantity
# If the current input is colored, assign its asset ID to the current output
if current_input.asset_id is not None:
progress = min(input_units_left, output_units_left)
output_units_left -= progress
input_units_left -= progress
if asset_id is None:
# This is the first input to map to this output
asset_id = current_input.asset_id
elif asset_id != current_input.asset_id:
# Another different asset ID has already been assigned to that output:
# the marker output is considered invalid
return None
result.append(TransactionOutput(
outputs[i].nValue, outputs[i].scriptPubKey, asset_id, output_asset_quantity, OutputType.transfer))
return result | Computes the asset IDs of every output in a transaction.
:param list[TransactionOutput] inputs: The outputs referenced by the inputs of the transaction.
:param int marker_output_index: The position of the marker output in the transaction.
:param list[CTxOut] outputs: The outputs of the transaction.
:param list[int] asset_quantities: The list of asset quantities of the outputs.
:return: A list of outputs with asset ID and asset quantity information.
:rtype: list[TransactionOutput] | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L121-L201 | [
"def hash_script(data):\n \"\"\"\n Hashes a script into an asset ID using SHA256 followed by RIPEMD160.\n\n :param bytes data: The data to hash.\n \"\"\"\n sha256 = hashlib.sha256()\n ripemd = hashlib.new('ripemd160')\n\n sha256.update(data)\n ripemd.update(sha256.digest())\n return ripemd.digest()\n"
] | class ColoringEngine(object):
"""The backtracking engine used to find the asset ID and asset quantity of any output."""
def __init__(self, transaction_provider, cache, event_loop):
"""
Constructs an instance of the ColorEngine class.
:param bytes -> Future[CTransaction] transaction_provider: A function returning a transaction given its hash.
:param OutputCache cache: The cache object to use.
:param BaseEventLoop | None event_loop: The event loop used to schedule asynchronous tasks.
"""
self._transaction_provider = transaction_provider
self._cache = cache
self._loop = event_loop
@asyncio.coroutine
def get_output(self, transaction_hash, output_index):
"""
Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput]
"""
cached_output = yield from self._cache.get(transaction_hash, output_index)
if cached_output is not None:
return cached_output
transaction = yield from self._transaction_provider(transaction_hash)
if transaction is None:
raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash)))
colored_outputs = yield from self.color_transaction(transaction)
for index, output in enumerate(colored_outputs):
yield from self._cache.put(transaction_hash, index, output)
return colored_outputs[output_index]
@asyncio.coroutine
def color_transaction(self, transaction):
"""
Computes the asset ID and asset quantity of every output in the transaction.
:param CTransaction transaction: The transaction to color.
:return: A list containing all the colored outputs of the transaction.
:rtype: Future[list[TransactionOutput]]
"""
# If the transaction is a coinbase transaction, the marker output is always invalid
if not transaction.is_coinbase():
for i, output in enumerate(transaction.vout):
# Parse the OP_RETURN script
marker_output_payload = MarkerOutput.parse_script(output.scriptPubKey)
if marker_output_payload is not None:
# Deserialize the payload as a marker output
marker_output = MarkerOutput.deserialize_payload(marker_output_payload)
if marker_output is not None:
# Fetch the colored outputs for previous transactions
inputs = []
for input in transaction.vin:
inputs.append((yield from asyncio.async(
self.get_output(input.prevout.hash, input.prevout.n), loop=self._loop)))
asset_ids = self._compute_asset_ids(
inputs,
i,
transaction.vout,
marker_output.asset_quantities)
if asset_ids is not None:
return asset_ids
# If no valid marker output was found in the transaction, all outputs are considered uncolored
return [
TransactionOutput(output.nValue, output.scriptPubKey, None, 0, OutputType.uncolored)
for output in transaction.vout]
@classmethod
@staticmethod
def hash_script(data):
"""
Hashes a script into an asset ID using SHA256 followed by RIPEMD160.
:param bytes data: The data to hash.
"""
sha256 = hashlib.sha256()
ripemd = hashlib.new('ripemd160')
sha256.update(data)
ripemd.update(sha256.digest())
return ripemd.digest()
|
OpenAssets/openassets | openassets/protocol.py | ColoringEngine.hash_script | python | def hash_script(data):
sha256 = hashlib.sha256()
ripemd = hashlib.new('ripemd160')
sha256.update(data)
ripemd.update(sha256.digest())
return ripemd.digest() | Hashes a script into an asset ID using SHA256 followed by RIPEMD160.
:param bytes data: The data to hash. | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L204-L215 | null | class ColoringEngine(object):
"""The backtracking engine used to find the asset ID and asset quantity of any output."""
def __init__(self, transaction_provider, cache, event_loop):
"""
Constructs an instance of the ColorEngine class.
:param bytes -> Future[CTransaction] transaction_provider: A function returning a transaction given its hash.
:param OutputCache cache: The cache object to use.
:param BaseEventLoop | None event_loop: The event loop used to schedule asynchronous tasks.
"""
self._transaction_provider = transaction_provider
self._cache = cache
self._loop = event_loop
@asyncio.coroutine
def get_output(self, transaction_hash, output_index):
"""
Gets an output and information about its asset ID and asset quantity.
:param bytes transaction_hash: The hash of the transaction containing the output.
:param int output_index: The index of the output.
:return: An object containing the output as well as its asset ID and asset quantity.
:rtype: Future[TransactionOutput]
"""
cached_output = yield from self._cache.get(transaction_hash, output_index)
if cached_output is not None:
return cached_output
transaction = yield from self._transaction_provider(transaction_hash)
if transaction is None:
raise ValueError('Transaction {0} could not be retrieved'.format(bitcoin.core.b2lx(transaction_hash)))
colored_outputs = yield from self.color_transaction(transaction)
for index, output in enumerate(colored_outputs):
yield from self._cache.put(transaction_hash, index, output)
return colored_outputs[output_index]
@asyncio.coroutine
def color_transaction(self, transaction):
"""
Computes the asset ID and asset quantity of every output in the transaction.
:param CTransaction transaction: The transaction to color.
:return: A list containing all the colored outputs of the transaction.
:rtype: Future[list[TransactionOutput]]
"""
# If the transaction is a coinbase transaction, the marker output is always invalid
if not transaction.is_coinbase():
for i, output in enumerate(transaction.vout):
# Parse the OP_RETURN script
marker_output_payload = MarkerOutput.parse_script(output.scriptPubKey)
if marker_output_payload is not None:
# Deserialize the payload as a marker output
marker_output = MarkerOutput.deserialize_payload(marker_output_payload)
if marker_output is not None:
# Fetch the colored outputs for previous transactions
inputs = []
for input in transaction.vin:
inputs.append((yield from asyncio.async(
self.get_output(input.prevout.hash, input.prevout.n), loop=self._loop)))
asset_ids = self._compute_asset_ids(
inputs,
i,
transaction.vout,
marker_output.asset_quantities)
if asset_ids is not None:
return asset_ids
# If no valid marker output was found in the transaction, all outputs are considered uncolored
return [
TransactionOutput(output.nValue, output.scriptPubKey, None, 0, OutputType.uncolored)
for output in transaction.vout]
@classmethod
def _compute_asset_ids(cls, inputs, marker_output_index, outputs, asset_quantities):
"""
Computes the asset IDs of every output in a transaction.
:param list[TransactionOutput] inputs: The outputs referenced by the inputs of the transaction.
:param int marker_output_index: The position of the marker output in the transaction.
:param list[CTxOut] outputs: The outputs of the transaction.
:param list[int] asset_quantities: The list of asset quantities of the outputs.
:return: A list of outputs with asset ID and asset quantity information.
:rtype: list[TransactionOutput]
"""
# If there are more items in the asset quantities list than outputs in the transaction (excluding the
# marker output), the marker output is deemed invalid
if len(asset_quantities) > len(outputs) - 1:
return None
# If there is no input in the transaction, the marker output is always invalid
if len(inputs) == 0:
return None
result = []
# Add the issuance outputs
issuance_asset_id = cls.hash_script(bytes(inputs[0].script))
for i in range(0, marker_output_index):
value, script = outputs[i].nValue, outputs[i].scriptPubKey
if i < len(asset_quantities) and asset_quantities[i] > 0:
output = TransactionOutput(value, script, issuance_asset_id, asset_quantities[i], OutputType.issuance)
else:
output = TransactionOutput(value, script, None, 0, OutputType.issuance)
result.append(output)
# Add the marker output
issuance_output = outputs[marker_output_index]
result.append(TransactionOutput(
issuance_output.nValue, issuance_output.scriptPubKey, None, 0, OutputType.marker_output))
# Add the transfer outputs
input_iterator = iter(inputs)
input_units_left = 0
for i in range(marker_output_index + 1, len(outputs)):
if i <= len(asset_quantities):
output_asset_quantity = asset_quantities[i - 1]
else:
output_asset_quantity = 0
output_units_left = output_asset_quantity
asset_id = None
while output_units_left > 0:
# Move to the next input if the current one is depleted
if input_units_left == 0:
current_input = next(input_iterator, None)
if current_input is None:
# There are less asset units available in the input than in the outputs:
# the marker output is considered invalid
return None
else:
input_units_left = current_input.asset_quantity
# If the current input is colored, assign its asset ID to the current output
if current_input.asset_id is not None:
progress = min(input_units_left, output_units_left)
output_units_left -= progress
input_units_left -= progress
if asset_id is None:
# This is the first input to map to this output
asset_id = current_input.asset_id
elif asset_id != current_input.asset_id:
# Another different asset ID has already been assigned to that output:
# the marker output is considered invalid
return None
result.append(TransactionOutput(
outputs[i].nValue, outputs[i].scriptPubKey, asset_id, output_asset_quantity, OutputType.transfer))
return result
@staticmethod
|
OpenAssets/openassets | openassets/protocol.py | MarkerOutput.deserialize_payload | python | def deserialize_payload(cls, payload):
with io.BytesIO(payload) as stream:
# The OAP marker and protocol version
oa_version = stream.read(4)
if oa_version != cls.OPEN_ASSETS_TAG:
return None
try:
# Deserialize the expected number of items in the asset quantity list
output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# LEB128-encoded unsigned integers representing the asset quantity of every output in order
asset_quantities = []
for i in range(0, output_count):
asset_quantity = cls.leb128_decode(stream)
# If the LEB128-encoded asset quantity of any output exceeds 9 bytes,
# the marker output is deemed invalid
if asset_quantity > cls.MAX_ASSET_QUANTITY:
return None
asset_quantities.append(asset_quantity)
# The var-integer encoded length of the metadata field.
metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# The actual metadata
metadata = stream.read(metadata_length)
# If the metadata string wasn't long enough, the marker output is malformed
if len(metadata) != metadata_length:
return None
# If there are bytes left to read, the marker output is malformed
last_byte = stream.read(1)
if len(last_byte) > 0:
return None
except bitcoin.core.SerializationTruncationError:
return None
return MarkerOutput(asset_quantities, metadata) | Deserializes the marker output payload.
:param bytes payload: A buffer containing the marker output payload.
:return: The marker output object.
:rtype: MarkerOutput | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L372-L421 | null | class MarkerOutput(object):
"""Represents an Open Assets marker output."""
MAX_ASSET_QUANTITY = 2 ** 63 - 1
OPEN_ASSETS_TAG = b'OA\x01\x00'
def __init__(self, asset_quantities, metadata):
"""
Initializes a new instance of the MarkerOutput class.
:param list[int] asset_quantities: The list of asset quantities.
:param bytes metadata: The metadata in the marker output.
"""
self._asset_quantities = asset_quantities
self._metadata = metadata
@property
def asset_quantities(self):
"""
Gets the asset quantity list.
:return: The asset quantity list of the output.
:rtype: list[int]
"""
return self._asset_quantities
@property
def metadata(self):
"""
Gets the metadata contained in the marker output.
:return: The metadata contained in the marker output.
:rtype: bytes
"""
return self._metadata
@classmethod
def serialize_payload(self):
"""
Serializes the marker output data into a payload buffer.
:return: The serialized payload.
:rtype: bytes
"""
with io.BytesIO() as stream:
stream.write(self.OPEN_ASSETS_TAG)
bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)
for asset_quantity in self.asset_quantities:
stream.write(self.leb128_encode(asset_quantity))
bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)
stream.write(self.metadata)
return stream.getvalue()
@staticmethod
def parse_script(output_script):
"""
Parses an output and returns the payload if the output matches the right pattern for a marker output,
or None otherwise.
:param CScript output_script: The output script to be parsed.
:return: The marker output payload if the output fits the pattern, None otherwise.
:rtype: bytes
"""
script_iterator = output_script.raw_iter()
try:
first_opcode, _, _ = next(script_iterator, (None, None, None))
_, data, _ = next(script_iterator, (None, None, None))
remainder = next(script_iterator, None)
except bitcoin.core.script.CScriptTruncatedPushDataError:
return None
except bitcoin.core.script.CScriptInvalidError:
return None
if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:
return data
else:
return None
@staticmethod
def build_script(data):
"""
Creates an output script containing an OP_RETURN and a PUSHDATA.
:param bytes data: The content of the PUSHDATA.
:return: The final script.
:rtype: CScript
"""
return bitcoin.core.script.CScript(
bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))
@staticmethod
def leb128_decode(data):
"""
Decodes a LEB128-encoded unsigned integer.
:param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode.
:return: The decoded integer.
:rtype: int
"""
result = 0
shift = 0
while True:
character = data.read(1)
if len(character) == 0:
raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer')
b = ord(character)
result |= (b & 0x7f) << shift
if b & 0x80 == 0:
break
shift += 7
return result
@staticmethod
def leb128_encode(value):
"""
Encodes an integer using LEB128.
:param int value: The value to encode.
:return: The LEB128-encoded integer.
:rtype: bytes
"""
if value == 0:
return b'\x00'
result = []
while value != 0:
byte = value & 0x7f
value >>= 7
if value != 0:
byte |= 0x80
result.append(byte)
return bytes(result)
def __repr__(self):
return 'MarkerOutput(asset_quantities=%r, metadata=%r)' % (self.asset_quantities, self.metadata)
|
OpenAssets/openassets | openassets/protocol.py | MarkerOutput.serialize_payload | python | def serialize_payload(self):
with io.BytesIO() as stream:
stream.write(self.OPEN_ASSETS_TAG)
bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)
for asset_quantity in self.asset_quantities:
stream.write(self.leb128_encode(asset_quantity))
bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)
stream.write(self.metadata)
return stream.getvalue() | Serializes the marker output data into a payload buffer.
:return: The serialized payload.
:rtype: bytes | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L423-L441 | [
"def leb128_encode(value):\n \"\"\"\n Encodes an integer using LEB128.\n\n :param int value: The value to encode.\n :return: The LEB128-encoded integer.\n :rtype: bytes\n \"\"\"\n if value == 0:\n return b'\\x00'\n\n result = []\n while value != 0:\n byte = value & 0x7f\n value >>= 7\n if value != 0:\n byte |= 0x80\n result.append(byte)\n\n return bytes(result)\n"
] | class MarkerOutput(object):
"""Represents an Open Assets marker output."""
MAX_ASSET_QUANTITY = 2 ** 63 - 1
OPEN_ASSETS_TAG = b'OA\x01\x00'
def __init__(self, asset_quantities, metadata):
"""
Initializes a new instance of the MarkerOutput class.
:param list[int] asset_quantities: The list of asset quantities.
:param bytes metadata: The metadata in the marker output.
"""
self._asset_quantities = asset_quantities
self._metadata = metadata
@property
def asset_quantities(self):
"""
Gets the asset quantity list.
:return: The asset quantity list of the output.
:rtype: list[int]
"""
return self._asset_quantities
@property
def metadata(self):
"""
Gets the metadata contained in the marker output.
:return: The metadata contained in the marker output.
:rtype: bytes
"""
return self._metadata
@classmethod
def deserialize_payload(cls, payload):
"""
Deserializes the marker output payload.
:param bytes payload: A buffer containing the marker output payload.
:return: The marker output object.
:rtype: MarkerOutput
"""
with io.BytesIO(payload) as stream:
# The OAP marker and protocol version
oa_version = stream.read(4)
if oa_version != cls.OPEN_ASSETS_TAG:
return None
try:
# Deserialize the expected number of items in the asset quantity list
output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# LEB128-encoded unsigned integers representing the asset quantity of every output in order
asset_quantities = []
for i in range(0, output_count):
asset_quantity = cls.leb128_decode(stream)
# If the LEB128-encoded asset quantity of any output exceeds 9 bytes,
# the marker output is deemed invalid
if asset_quantity > cls.MAX_ASSET_QUANTITY:
return None
asset_quantities.append(asset_quantity)
# The var-integer encoded length of the metadata field.
metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# The actual metadata
metadata = stream.read(metadata_length)
# If the metadata string wasn't long enough, the marker output is malformed
if len(metadata) != metadata_length:
return None
# If there are bytes left to read, the marker output is malformed
last_byte = stream.read(1)
if len(last_byte) > 0:
return None
except bitcoin.core.SerializationTruncationError:
return None
return MarkerOutput(asset_quantities, metadata)
@staticmethod
def parse_script(output_script):
"""
Parses an output and returns the payload if the output matches the right pattern for a marker output,
or None otherwise.
:param CScript output_script: The output script to be parsed.
:return: The marker output payload if the output fits the pattern, None otherwise.
:rtype: bytes
"""
script_iterator = output_script.raw_iter()
try:
first_opcode, _, _ = next(script_iterator, (None, None, None))
_, data, _ = next(script_iterator, (None, None, None))
remainder = next(script_iterator, None)
except bitcoin.core.script.CScriptTruncatedPushDataError:
return None
except bitcoin.core.script.CScriptInvalidError:
return None
if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:
return data
else:
return None
@staticmethod
def build_script(data):
"""
Creates an output script containing an OP_RETURN and a PUSHDATA.
:param bytes data: The content of the PUSHDATA.
:return: The final script.
:rtype: CScript
"""
return bitcoin.core.script.CScript(
bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))
@staticmethod
def leb128_decode(data):
"""
Decodes a LEB128-encoded unsigned integer.
:param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode.
:return: The decoded integer.
:rtype: int
"""
result = 0
shift = 0
while True:
character = data.read(1)
if len(character) == 0:
raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer')
b = ord(character)
result |= (b & 0x7f) << shift
if b & 0x80 == 0:
break
shift += 7
return result
@staticmethod
def leb128_encode(value):
"""
Encodes an integer using LEB128.
:param int value: The value to encode.
:return: The LEB128-encoded integer.
:rtype: bytes
"""
if value == 0:
return b'\x00'
result = []
while value != 0:
byte = value & 0x7f
value >>= 7
if value != 0:
byte |= 0x80
result.append(byte)
return bytes(result)
def __repr__(self):
return 'MarkerOutput(asset_quantities=%r, metadata=%r)' % (self.asset_quantities, self.metadata)
|
OpenAssets/openassets | openassets/protocol.py | MarkerOutput.parse_script | python | def parse_script(output_script):
script_iterator = output_script.raw_iter()
try:
first_opcode, _, _ = next(script_iterator, (None, None, None))
_, data, _ = next(script_iterator, (None, None, None))
remainder = next(script_iterator, None)
except bitcoin.core.script.CScriptTruncatedPushDataError:
return None
except bitcoin.core.script.CScriptInvalidError:
return None
if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:
return data
else:
return None | Parses an output and returns the payload if the output matches the right pattern for a marker output,
or None otherwise.
:param CScript output_script: The output script to be parsed.
:return: The marker output payload if the output fits the pattern, None otherwise.
:rtype: bytes | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L444-L467 | null | class MarkerOutput(object):
"""Represents an Open Assets marker output."""
MAX_ASSET_QUANTITY = 2 ** 63 - 1
OPEN_ASSETS_TAG = b'OA\x01\x00'
def __init__(self, asset_quantities, metadata):
"""
Initializes a new instance of the MarkerOutput class.
:param list[int] asset_quantities: The list of asset quantities.
:param bytes metadata: The metadata in the marker output.
"""
self._asset_quantities = asset_quantities
self._metadata = metadata
@property
def asset_quantities(self):
"""
Gets the asset quantity list.
:return: The asset quantity list of the output.
:rtype: list[int]
"""
return self._asset_quantities
@property
def metadata(self):
"""
Gets the metadata contained in the marker output.
:return: The metadata contained in the marker output.
:rtype: bytes
"""
return self._metadata
@classmethod
def deserialize_payload(cls, payload):
"""
Deserializes the marker output payload.
:param bytes payload: A buffer containing the marker output payload.
:return: The marker output object.
:rtype: MarkerOutput
"""
with io.BytesIO(payload) as stream:
# The OAP marker and protocol version
oa_version = stream.read(4)
if oa_version != cls.OPEN_ASSETS_TAG:
return None
try:
# Deserialize the expected number of items in the asset quantity list
output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# LEB128-encoded unsigned integers representing the asset quantity of every output in order
asset_quantities = []
for i in range(0, output_count):
asset_quantity = cls.leb128_decode(stream)
# If the LEB128-encoded asset quantity of any output exceeds 9 bytes,
# the marker output is deemed invalid
if asset_quantity > cls.MAX_ASSET_QUANTITY:
return None
asset_quantities.append(asset_quantity)
# The var-integer encoded length of the metadata field.
metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# The actual metadata
metadata = stream.read(metadata_length)
# If the metadata string wasn't long enough, the marker output is malformed
if len(metadata) != metadata_length:
return None
# If there are bytes left to read, the marker output is malformed
last_byte = stream.read(1)
if len(last_byte) > 0:
return None
except bitcoin.core.SerializationTruncationError:
return None
return MarkerOutput(asset_quantities, metadata)
def serialize_payload(self):
"""
Serializes the marker output data into a payload buffer.
:return: The serialized payload.
:rtype: bytes
"""
with io.BytesIO() as stream:
stream.write(self.OPEN_ASSETS_TAG)
bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)
for asset_quantity in self.asset_quantities:
stream.write(self.leb128_encode(asset_quantity))
bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)
stream.write(self.metadata)
return stream.getvalue()
@staticmethod
@staticmethod
def build_script(data):
"""
Creates an output script containing an OP_RETURN and a PUSHDATA.
:param bytes data: The content of the PUSHDATA.
:return: The final script.
:rtype: CScript
"""
return bitcoin.core.script.CScript(
bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))
@staticmethod
def leb128_decode(data):
"""
Decodes a LEB128-encoded unsigned integer.
:param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode.
:return: The decoded integer.
:rtype: int
"""
result = 0
shift = 0
while True:
character = data.read(1)
if len(character) == 0:
raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer')
b = ord(character)
result |= (b & 0x7f) << shift
if b & 0x80 == 0:
break
shift += 7
return result
@staticmethod
def leb128_encode(value):
"""
Encodes an integer using LEB128.
:param int value: The value to encode.
:return: The LEB128-encoded integer.
:rtype: bytes
"""
if value == 0:
return b'\x00'
result = []
while value != 0:
byte = value & 0x7f
value >>= 7
if value != 0:
byte |= 0x80
result.append(byte)
return bytes(result)
def __repr__(self):
return 'MarkerOutput(asset_quantities=%r, metadata=%r)' % (self.asset_quantities, self.metadata)
|
OpenAssets/openassets | openassets/protocol.py | MarkerOutput.build_script | python | def build_script(data):
return bitcoin.core.script.CScript(
bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data)) | Creates an output script containing an OP_RETURN and a PUSHDATA.
:param bytes data: The content of the PUSHDATA.
:return: The final script.
:rtype: CScript | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L470-L479 | null | class MarkerOutput(object):
"""Represents an Open Assets marker output."""
MAX_ASSET_QUANTITY = 2 ** 63 - 1
OPEN_ASSETS_TAG = b'OA\x01\x00'
def __init__(self, asset_quantities, metadata):
"""
Initializes a new instance of the MarkerOutput class.
:param list[int] asset_quantities: The list of asset quantities.
:param bytes metadata: The metadata in the marker output.
"""
self._asset_quantities = asset_quantities
self._metadata = metadata
@property
def asset_quantities(self):
"""
Gets the asset quantity list.
:return: The asset quantity list of the output.
:rtype: list[int]
"""
return self._asset_quantities
@property
def metadata(self):
"""
Gets the metadata contained in the marker output.
:return: The metadata contained in the marker output.
:rtype: bytes
"""
return self._metadata
@classmethod
def deserialize_payload(cls, payload):
"""
Deserializes the marker output payload.
:param bytes payload: A buffer containing the marker output payload.
:return: The marker output object.
:rtype: MarkerOutput
"""
with io.BytesIO(payload) as stream:
# The OAP marker and protocol version
oa_version = stream.read(4)
if oa_version != cls.OPEN_ASSETS_TAG:
return None
try:
# Deserialize the expected number of items in the asset quantity list
output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# LEB128-encoded unsigned integers representing the asset quantity of every output in order
asset_quantities = []
for i in range(0, output_count):
asset_quantity = cls.leb128_decode(stream)
# If the LEB128-encoded asset quantity of any output exceeds 9 bytes,
# the marker output is deemed invalid
if asset_quantity > cls.MAX_ASSET_QUANTITY:
return None
asset_quantities.append(asset_quantity)
# The var-integer encoded length of the metadata field.
metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# The actual metadata
metadata = stream.read(metadata_length)
# If the metadata string wasn't long enough, the marker output is malformed
if len(metadata) != metadata_length:
return None
# If there are bytes left to read, the marker output is malformed
last_byte = stream.read(1)
if len(last_byte) > 0:
return None
except bitcoin.core.SerializationTruncationError:
return None
return MarkerOutput(asset_quantities, metadata)
def serialize_payload(self):
"""
Serializes the marker output data into a payload buffer.
:return: The serialized payload.
:rtype: bytes
"""
with io.BytesIO() as stream:
stream.write(self.OPEN_ASSETS_TAG)
bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)
for asset_quantity in self.asset_quantities:
stream.write(self.leb128_encode(asset_quantity))
bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)
stream.write(self.metadata)
return stream.getvalue()
@staticmethod
def parse_script(output_script):
"""
Parses an output and returns the payload if the output matches the right pattern for a marker output,
or None otherwise.
:param CScript output_script: The output script to be parsed.
:return: The marker output payload if the output fits the pattern, None otherwise.
:rtype: bytes
"""
script_iterator = output_script.raw_iter()
try:
first_opcode, _, _ = next(script_iterator, (None, None, None))
_, data, _ = next(script_iterator, (None, None, None))
remainder = next(script_iterator, None)
except bitcoin.core.script.CScriptTruncatedPushDataError:
return None
except bitcoin.core.script.CScriptInvalidError:
return None
if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:
return data
else:
return None
@staticmethod
@staticmethod
def leb128_decode(data):
"""
Decodes a LEB128-encoded unsigned integer.
:param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode.
:return: The decoded integer.
:rtype: int
"""
result = 0
shift = 0
while True:
character = data.read(1)
if len(character) == 0:
raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer')
b = ord(character)
result |= (b & 0x7f) << shift
if b & 0x80 == 0:
break
shift += 7
return result
@staticmethod
def leb128_encode(value):
"""
Encodes an integer using LEB128.
:param int value: The value to encode.
:return: The LEB128-encoded integer.
:rtype: bytes
"""
if value == 0:
return b'\x00'
result = []
while value != 0:
byte = value & 0x7f
value >>= 7
if value != 0:
byte |= 0x80
result.append(byte)
return bytes(result)
def __repr__(self):
return 'MarkerOutput(asset_quantities=%r, metadata=%r)' % (self.asset_quantities, self.metadata)
|
OpenAssets/openassets | openassets/protocol.py | MarkerOutput.leb128_decode | python | def leb128_decode(data):
result = 0
shift = 0
while True:
character = data.read(1)
if len(character) == 0:
raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer')
b = ord(character)
result |= (b & 0x7f) << shift
if b & 0x80 == 0:
break
shift += 7
return result | Decodes a LEB128-encoded unsigned integer.
:param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode.
:return: The decoded integer.
:rtype: int | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L482-L503 | null | class MarkerOutput(object):
"""Represents an Open Assets marker output."""
MAX_ASSET_QUANTITY = 2 ** 63 - 1
OPEN_ASSETS_TAG = b'OA\x01\x00'
def __init__(self, asset_quantities, metadata):
"""
Initializes a new instance of the MarkerOutput class.
:param list[int] asset_quantities: The list of asset quantities.
:param bytes metadata: The metadata in the marker output.
"""
self._asset_quantities = asset_quantities
self._metadata = metadata
@property
def asset_quantities(self):
"""
Gets the asset quantity list.
:return: The asset quantity list of the output.
:rtype: list[int]
"""
return self._asset_quantities
@property
def metadata(self):
"""
Gets the metadata contained in the marker output.
:return: The metadata contained in the marker output.
:rtype: bytes
"""
return self._metadata
@classmethod
def deserialize_payload(cls, payload):
"""
Deserializes the marker output payload.
:param bytes payload: A buffer containing the marker output payload.
:return: The marker output object.
:rtype: MarkerOutput
"""
with io.BytesIO(payload) as stream:
# The OAP marker and protocol version
oa_version = stream.read(4)
if oa_version != cls.OPEN_ASSETS_TAG:
return None
try:
# Deserialize the expected number of items in the asset quantity list
output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# LEB128-encoded unsigned integers representing the asset quantity of every output in order
asset_quantities = []
for i in range(0, output_count):
asset_quantity = cls.leb128_decode(stream)
# If the LEB128-encoded asset quantity of any output exceeds 9 bytes,
# the marker output is deemed invalid
if asset_quantity > cls.MAX_ASSET_QUANTITY:
return None
asset_quantities.append(asset_quantity)
# The var-integer encoded length of the metadata field.
metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# The actual metadata
metadata = stream.read(metadata_length)
# If the metadata string wasn't long enough, the marker output is malformed
if len(metadata) != metadata_length:
return None
# If there are bytes left to read, the marker output is malformed
last_byte = stream.read(1)
if len(last_byte) > 0:
return None
except bitcoin.core.SerializationTruncationError:
return None
return MarkerOutput(asset_quantities, metadata)
def serialize_payload(self):
"""
Serializes the marker output data into a payload buffer.
:return: The serialized payload.
:rtype: bytes
"""
with io.BytesIO() as stream:
stream.write(self.OPEN_ASSETS_TAG)
bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)
for asset_quantity in self.asset_quantities:
stream.write(self.leb128_encode(asset_quantity))
bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)
stream.write(self.metadata)
return stream.getvalue()
@staticmethod
def parse_script(output_script):
"""
Parses an output and returns the payload if the output matches the right pattern for a marker output,
or None otherwise.
:param CScript output_script: The output script to be parsed.
:return: The marker output payload if the output fits the pattern, None otherwise.
:rtype: bytes
"""
script_iterator = output_script.raw_iter()
try:
first_opcode, _, _ = next(script_iterator, (None, None, None))
_, data, _ = next(script_iterator, (None, None, None))
remainder = next(script_iterator, None)
except bitcoin.core.script.CScriptTruncatedPushDataError:
return None
except bitcoin.core.script.CScriptInvalidError:
return None
if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:
return data
else:
return None
@staticmethod
def build_script(data):
"""
Creates an output script containing an OP_RETURN and a PUSHDATA.
:param bytes data: The content of the PUSHDATA.
:return: The final script.
:rtype: CScript
"""
return bitcoin.core.script.CScript(
bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))
@staticmethod
@staticmethod
def leb128_encode(value):
"""
Encodes an integer using LEB128.
:param int value: The value to encode.
:return: The LEB128-encoded integer.
:rtype: bytes
"""
if value == 0:
return b'\x00'
result = []
while value != 0:
byte = value & 0x7f
value >>= 7
if value != 0:
byte |= 0x80
result.append(byte)
return bytes(result)
def __repr__(self):
return 'MarkerOutput(asset_quantities=%r, metadata=%r)' % (self.asset_quantities, self.metadata)
|
OpenAssets/openassets | openassets/protocol.py | MarkerOutput.leb128_encode | python | def leb128_encode(value):
if value == 0:
return b'\x00'
result = []
while value != 0:
byte = value & 0x7f
value >>= 7
if value != 0:
byte |= 0x80
result.append(byte)
return bytes(result) | Encodes an integer using LEB128.
:param int value: The value to encode.
:return: The LEB128-encoded integer.
:rtype: bytes | train | https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/protocol.py#L506-L525 | null | class MarkerOutput(object):
"""Represents an Open Assets marker output."""
MAX_ASSET_QUANTITY = 2 ** 63 - 1
OPEN_ASSETS_TAG = b'OA\x01\x00'
def __init__(self, asset_quantities, metadata):
"""
Initializes a new instance of the MarkerOutput class.
:param list[int] asset_quantities: The list of asset quantities.
:param bytes metadata: The metadata in the marker output.
"""
self._asset_quantities = asset_quantities
self._metadata = metadata
@property
def asset_quantities(self):
"""
Gets the asset quantity list.
:return: The asset quantity list of the output.
:rtype: list[int]
"""
return self._asset_quantities
@property
def metadata(self):
"""
Gets the metadata contained in the marker output.
:return: The metadata contained in the marker output.
:rtype: bytes
"""
return self._metadata
@classmethod
def deserialize_payload(cls, payload):
"""
Deserializes the marker output payload.
:param bytes payload: A buffer containing the marker output payload.
:return: The marker output object.
:rtype: MarkerOutput
"""
with io.BytesIO(payload) as stream:
# The OAP marker and protocol version
oa_version = stream.read(4)
if oa_version != cls.OPEN_ASSETS_TAG:
return None
try:
# Deserialize the expected number of items in the asset quantity list
output_count = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# LEB128-encoded unsigned integers representing the asset quantity of every output in order
asset_quantities = []
for i in range(0, output_count):
asset_quantity = cls.leb128_decode(stream)
# If the LEB128-encoded asset quantity of any output exceeds 9 bytes,
# the marker output is deemed invalid
if asset_quantity > cls.MAX_ASSET_QUANTITY:
return None
asset_quantities.append(asset_quantity)
# The var-integer encoded length of the metadata field.
metadata_length = bitcoin.core.VarIntSerializer.stream_deserialize(stream)
# The actual metadata
metadata = stream.read(metadata_length)
# If the metadata string wasn't long enough, the marker output is malformed
if len(metadata) != metadata_length:
return None
# If there are bytes left to read, the marker output is malformed
last_byte = stream.read(1)
if len(last_byte) > 0:
return None
except bitcoin.core.SerializationTruncationError:
return None
return MarkerOutput(asset_quantities, metadata)
def serialize_payload(self):
"""
Serializes the marker output data into a payload buffer.
:return: The serialized payload.
:rtype: bytes
"""
with io.BytesIO() as stream:
stream.write(self.OPEN_ASSETS_TAG)
bitcoin.core.VarIntSerializer.stream_serialize(len(self.asset_quantities), stream)
for asset_quantity in self.asset_quantities:
stream.write(self.leb128_encode(asset_quantity))
bitcoin.core.VarIntSerializer.stream_serialize(len(self.metadata), stream)
stream.write(self.metadata)
return stream.getvalue()
@staticmethod
def parse_script(output_script):
"""
Parses an output and returns the payload if the output matches the right pattern for a marker output,
or None otherwise.
:param CScript output_script: The output script to be parsed.
:return: The marker output payload if the output fits the pattern, None otherwise.
:rtype: bytes
"""
script_iterator = output_script.raw_iter()
try:
first_opcode, _, _ = next(script_iterator, (None, None, None))
_, data, _ = next(script_iterator, (None, None, None))
remainder = next(script_iterator, None)
except bitcoin.core.script.CScriptTruncatedPushDataError:
return None
except bitcoin.core.script.CScriptInvalidError:
return None
if first_opcode == bitcoin.core.script.OP_RETURN and data is not None and remainder is None:
return data
else:
return None
@staticmethod
def build_script(data):
"""
Creates an output script containing an OP_RETURN and a PUSHDATA.
:param bytes data: The content of the PUSHDATA.
:return: The final script.
:rtype: CScript
"""
return bitcoin.core.script.CScript(
bytes([bitcoin.core.script.OP_RETURN]) + bitcoin.core.script.CScriptOp.encode_op_pushdata(data))
@staticmethod
def leb128_decode(data):
"""
Decodes a LEB128-encoded unsigned integer.
:param BufferedIOBase data: The buffer containing the LEB128-encoded integer to decode.
:return: The decoded integer.
:rtype: int
"""
result = 0
shift = 0
while True:
character = data.read(1)
if len(character) == 0:
raise bitcoin.core.SerializationTruncationError('Invalid LEB128 integer')
b = ord(character)
result |= (b & 0x7f) << shift
if b & 0x80 == 0:
break
shift += 7
return result
@staticmethod
def __repr__(self):
return 'MarkerOutput(asset_quantities=%r, metadata=%r)' % (self.asset_quantities, self.metadata)
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | memory | python | def memory():
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info | Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L69-L83 | null | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | get_chunk_size | python | def get_chunk_size(N, n):
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1) | Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L86-L127 | [
"def memory():\n \"\"\"Determine memory specifications of the machine.\n\n Returns\n -------\n mem_info : dictonary\n Holds the current values for the total, free and used memory of the system.\n \"\"\"\n\n mem_info = dict()\n\n for k, v in psutil.virtual_memory()._asdict().items():\n mem_info[k] = int(v)\n\n return mem_info\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | get_compression_filter | python | def get_compression_filter(byte_counts):
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS | Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L130-L157 | [
"def memory():\n \"\"\"Determine memory specifications of the machine.\n\n Returns\n -------\n mem_info : dictonary\n Holds the current values for the total, free and used memory of the system.\n \"\"\"\n\n mem_info = dict()\n\n for k, v in psutil.virtual_memory()._asdict().items():\n mem_info[k] = int(v)\n\n return mem_info\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | build_hypergraph_adjacency | python | def build_hypergraph_adjacency(cluster_runs):
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency | Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L160-L183 | [
"def create_membership_matrix(cluster_run):\n \"\"\"For a label vector represented by cluster_run, constructs the binary \n membership indicator matrix. Such matrices, when concatenated, contribute \n to the adjacency matrix for a hypergraph representation of an \n ensemble of clusterings.\n\n Parameters\n ----------\n cluster_run : array of shape (n_partitions, n_samples)\n\n Returns\n -------\n An adjacnecy matrix in compressed sparse row form.\n \"\"\"\n\n cluster_run = np.asanyarray(cluster_run)\n\n if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):\n raise ValueError(\"\\nERROR: Cluster_Ensembles: create_membership_matrix: \"\n \"problem in dimensions of the cluster label vector \"\n \"under consideration.\")\n else:\n cluster_run = cluster_run.reshape(cluster_run.size)\n\n cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))\n\n indices = np.empty(0, dtype = np.int32)\n indptr = np.zeros(1, dtype = np.int32)\n\n for elt in cluster_ids:\n indices = np.append(indices, np.where(cluster_run == elt)[0])\n indptr = np.append(indptr, indices.size)\n\n data = np.ones(indices.size, dtype = int)\n\n return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | store_hypergraph_adjacency | python | def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array | Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L186-L215 | [
"def get_compression_filter(byte_counts):\n \"\"\"Determine whether or not to use a compression on the array stored in\n a hierarchical data format, and which compression library to use to that purpose.\n Compression reduces the HDF5 file size and also helps improving I/O efficiency\n for large datasets.\n\n Parameters\n ----------\n byte_counts : int\n\n Returns\n -------\n FILTERS : instance of the tables.Filters class\n \"\"\"\n\n assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0\n\n if 2 * byte_counts > 1000 * memory()['free']:\n try:\n FILTERS = tables.filters(complevel = 5, complib = 'blosc', \n shuffle = True, least_significant_digit = 6)\n except tables.FiltersWarning:\n FILTERS = tables.filters(complevel = 5, complib = 'lzo', \n shuffle = True, least_significant_digit = 6) \n else:\n FILTERS = None\n\n return FILTERS\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | load_hypergraph_adjacency | python | def load_hypergraph_adjacency(hdf5_file_name):
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency | Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L218-L237 | null | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | cluster_ensembles | python | def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)] | Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L240-L315 | [
"def build_hypergraph_adjacency(cluster_runs):\n \"\"\"Return the adjacency matrix to a hypergraph, in sparse matrix representation.\n\n Parameters\n ----------\n cluster_runs : array of shape (n_partitions, n_samples)\n\n Returns\n -------\n hypergraph_adjacency : compressed sparse row matrix\n Represents the hypergraph associated with an ensemble of partitions,\n each partition corresponding to a row of the array 'cluster_runs'\n provided at input.\n \"\"\"\n\n N_runs = cluster_runs.shape[0]\n\n hypergraph_adjacency = create_membership_matrix(cluster_runs[0])\n for i in range(1, N_runs):\n hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,\n create_membership_matrix(cluster_runs[i])], \n format = 'csr')\n\n return hypergraph_adjacency\n",
"def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):\n \"\"\"Write an hypergraph adjacency to disk to disk in an HDF5 data structure.\n\n Parameters\n ----------\n hypergraph_adjacency : compressed sparse row matrix\n\n hdf5_file_name : file handle or string\n \"\"\"\n\n assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)\n\n byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes\n FILTERS = get_compression_filter(byte_counts)\n\n with tables.open_file(hdf5_file_name, 'r+') as fileh:\n for par in ('data', 'indices', 'indptr', 'shape'):\n try:\n n = getattr(fileh.root.consensus_group, par)\n n._f_remove()\n except AttributeError:\n pass\n\n array = np.array(getattr(hypergraph_adjacency, par))\n\n atom = tables.Atom.from_dtype(array.dtype)\n ds = fileh.create_carray(fileh.root.consensus_group, par, atom, \n array.shape, filters = FILTERS)\n\n ds[:] = array\n",
"def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):\n \"\"\"Compute a weighted average of the mutual information with the known labels, \n the weights being proportional to the fraction of known labels.\n\n Parameters\n ----------\n cluster_runs : array of shape (n_partitions, n_samples)\n Each row of this matrix is such that the i-th entry corresponds to the\n cluster ID to which the i-th sample of the data-set has been classified\n by this particular clustering. Samples not selected for clustering\n in a given round are are tagged by an NaN.\n\n cluster_ensemble : array of shape (n_samples,), optional (default = None)\n The identity of the cluster to which each sample of the whole data-set \n belong to according to consensus clustering.\n\n verbose : Boolean, optional (default = False)\n Specifies if status messages will be displayed\n on the standard output.\n\n Returns\n -------\n unnamed variable : float\n The weighted average of the mutual information between\n the consensus clustering and the many runs from the ensemble\n of independent clusterings on subsamples of the data-set.\n \"\"\"\n\n if cluster_ensemble is None:\n return 0.0\n\n if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):\n cluster_runs = cluster_runs.reshape(1, -1)\n\n weighted_average_mutual_information = 0\n\n N_labelled_indices = 0\n\n for i in range(cluster_runs.shape[0]):\n labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]\n N = labelled_indices.size\n\n x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)\n y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)\n\n q = normalized_mutual_info_score(x, y)\n\n weighted_average_mutual_information += q * N\n N_labelled_indices += N\n\n return float(weighted_average_mutual_information) / N_labelled_indices\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | ceEvalMutual | python | def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices | Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L318-L368 | [
"def checkcl(cluster_run, verbose = False):\n \"\"\"Ensure that a cluster labelling is in a valid format. \n\n Parameters\n ----------\n cluster_run : array of shape (n_samples,)\n A vector of cluster IDs for each of the samples selected for a given\n round of clustering. The samples not selected are labelled with NaN.\n\n verbose : Boolean, optional (default = False)\n Specifies if status messages will be displayed\n on the standard output.\n\n Returns\n -------\n cluster_run : array of shape (n_samples,)\n The input vector is modified in place, such that invalid values are\n either rejected or altered. In particular, the labelling of cluster IDs\n starts at zero and increases by 1 without any gap left.\n \"\"\"\n\n cluster_run = np.asanyarray(cluster_run)\n\n if cluster_run.size == 0:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: checkcl: \"\n \"empty vector provided as input.\\n\")\n elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):\n raise ValueError(\"\\nERROR: Cluster_Ensembles: checkl: \"\n \"problem in dimensions of the cluster label vector \"\n \"under consideration.\\n\")\n elif np.where(np.isnan(cluster_run))[0].size != 0:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: checkl: vector of cluster \"\n \"labellings provided as input contains at least one 'NaN'.\\n\")\n else:\n min_label = np.amin(cluster_run)\n if min_label < 0:\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checkcl: detected negative values \"\n \"as cluster labellings.\")\n\n cluster_run -= min_label\n\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checkcl: \"\n \"offset to a minimum value of '0'.\")\n\n x = one_to_max(cluster_run) \n if np.amax(cluster_run) != np.amax(x):\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checkcl: the vector cluster \"\n \"labellings provided is not a dense integer mapping.\")\n\n cluster_run = x\n\n if verbose:\n print(\"INFO: Cluster_Ensembles: checkcl: brought modification \"\n \"to this vector so that its labels range \"\n \"from 0 to {0}, included.\\n\".format(np.amax(cluster_run)))\n\n return cluster_run\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | checkcl | python | def checkcl(cluster_run, verbose = False):
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run | Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L371-L430 | null | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | one_to_max | python | def one_to_max(array_in):
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result | Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L433-L469 | null | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | checks | python | def checks(similarities, verbose = False):
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.") | Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L472-L551 | null | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | CSPA | python | def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max) | Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L554-L623 | [
"def load_hypergraph_adjacency(hdf5_file_name):\n \"\"\"\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n Returns\n -------\n hypergraph_adjacency : compressed sparse row matrix\n \"\"\"\n\n with tables.open_file(hdf5_file_name, 'r+') as fileh:\n pars = []\n for par in ('data', 'indices', 'indptr', 'shape'):\n pars.append(getattr(fileh.root.consensus_group, par).read())\n\n hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])\n\n return hypergraph_adjacency\n",
"def get_chunk_size(N, n):\n \"\"\"Given a two-dimensional array with a dimension of size 'N', \n determine the number of rows or columns that can fit into memory.\n\n Parameters\n ----------\n N : int\n The size of one of the dimensions of a two-dimensional array. \n\n n : int\n The number of arrays of size 'N' times 'chunk_size' that can fit in memory.\n\n Returns\n -------\n chunk_size : int\n The size of the dimension orthogonal to the one of size 'N'. \n \"\"\"\n\n mem_free = memory()['free']\n if mem_free > 60000000:\n chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 40000000:\n chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 14000000:\n chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 8000000:\n chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 2000000:\n chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 1000000:\n chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))\n return chunk_size\n else:\n print(\"\\nERROR: Cluster_Ensembles: get_chunk_size: \"\n \"this machine does not have enough free memory resources \"\n \"to perform ensemble clustering.\\n\")\n sys.exit(1)\n",
"def get_compression_filter(byte_counts):\n \"\"\"Determine whether or not to use a compression on the array stored in\n a hierarchical data format, and which compression library to use to that purpose.\n Compression reduces the HDF5 file size and also helps improving I/O efficiency\n for large datasets.\n\n Parameters\n ----------\n byte_counts : int\n\n Returns\n -------\n FILTERS : instance of the tables.Filters class\n \"\"\"\n\n assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0\n\n if 2 * byte_counts > 1000 * memory()['free']:\n try:\n FILTERS = tables.filters(complevel = 5, complib = 'blosc', \n shuffle = True, least_significant_digit = 6)\n except tables.FiltersWarning:\n FILTERS = tables.filters(complevel = 5, complib = 'lzo', \n shuffle = True, least_significant_digit = 6) \n else:\n FILTERS = None\n\n return FILTERS\n",
"def checks(similarities, verbose = False):\n \"\"\"Check that a matrix is a proper similarity matrix and bring \n appropriate changes if applicable.\n\n Parameters\n ----------\n similarities : array of shape (n_samples, n_samples)\n A matrix of pairwise similarities between (sub)-samples of the data-set. \n\n verbose : Boolean, optional (default = False)\n Alerts of any issue with the similarities matrix provided\n and of any step possibly taken to remediate such problem.\n \"\"\"\n\n if similarities.size == 0:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: checks: the similarities \"\n \"matrix provided as input happens to be empty.\\n\")\n elif np.where(np.isnan(similarities))[0].size != 0:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: checks: input similarities \"\n \"matrix contains at least one 'NaN'.\\n\")\n elif np.where(np.isinf(similarities))[0].size != 0:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: checks: at least one infinite entry \"\n \"detected in input similarities matrix.\\n\")\n else:\n if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: complex entries found \"\n \"in the similarities matrix.\")\n\n similarities = similarities.real\n\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: \"\n \"truncated to their real components.\")\n\n if similarities.shape[0] != similarities.shape[1]:\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: non-square matrix provided.\")\n\n N_square = min(similarities.shape)\n similarities = similarities[:N_square, :N_square]\n\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.\")\n\n max_sim = np.amax(similarities)\n min_sim = np.amin(similarities)\n if max_sim > 1 or min_sim < 0:\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: strictly negative \"\n \"or bigger than unity entries spotted in input similarities matrix.\")\n\n indices_too_big = np.where(similarities > 1) \n indices_negative = np.where(similarities < 0)\n similarities[indices_too_big] = 1.0\n similarities[indices_negative] = 0.0\n\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: done setting them to \"\n \"the lower or upper accepted values.\") \n\n if not np.allclose(similarities, np.transpose(similarities)):\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: non-symmetric input \"\n \"similarities matrix.\")\n\n similarities = np.divide(similarities + np.transpose(similarities), 2.0)\n\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: now symmetrized.\")\n\n if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: the self-similarities \"\n \"provided as input are not all of unit value.\")\n\n similarities[np.diag_indices(similarities.shape[0])] = 1\n\n if verbose:\n print(\"\\nINFO: Cluster_Ensembles: checks: issue corrected.\")\n",
"def metis(hdf5_file_name, N_clusters_max):\n \"\"\"METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph \n passed by CSPA.\n\n Parameters\n ----------\n hdf5_file_name : string or file handle\n\n N_clusters_max : int\n\n Returns\n -------\n labels : array of shape (n_samples,)\n A vector of labels denoting the cluster to which each sample has been assigned\n as a result of the CSPA heuristics for consensus clustering.\n\n Reference\n ---------\n G. Karypis and V. Kumar, \"A Fast and High Quality Multilevel Scheme for\n Partitioning Irregular Graphs\"\n In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.\n \"\"\"\n\n file_name = wgraph(hdf5_file_name)\n labels = sgraph(N_clusters_max, file_name)\n subprocess.call(['rm', file_name])\n\n return labels\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | HGPA | python | def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max) | HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L626-L657 | [
"def hmetis(hdf5_file_name, N_clusters_max, w = None):\n \"\"\"Gives cluster labels ranging from 1 to N_clusters_max for \n hypergraph partitioning required for HGPA.\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n N_clusters_max : int\n\n w : array, optional (default = None)\n\n Returns\n -------\n labels : array of shape (n_samples,)\n A vector of labels denoting the cluster to which each sample has been assigned\n as a result of the HGPA approximation algorithm for consensus clustering.\n\n Reference\n ---------\n G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, \"Multilevel hypergraph\n partitioning: applications in VLSI domain\" \n In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, \n Vol. 7, No. 1, pp. 69-79, 1999.\n \"\"\"\n\n if w is None:\n file_name = wgraph(hdf5_file_name, None, 2)\n else:\n file_name = wgraph(hdf5_file_name, w, 3)\n labels = sgraph(N_clusters_max, file_name)\n labels = one_to_max(labels)\n\n subprocess.call(['rm', file_name])\n\n return labels\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | MCLA | python | def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels | Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L660-L881 | [
"def load_hypergraph_adjacency(hdf5_file_name):\n \"\"\"\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n Returns\n -------\n hypergraph_adjacency : compressed sparse row matrix\n \"\"\"\n\n with tables.open_file(hdf5_file_name, 'r+') as fileh:\n pars = []\n for par in ('data', 'indices', 'indptr', 'shape'):\n pars.append(getattr(fileh.root.consensus_group, par).read())\n\n hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])\n\n return hypergraph_adjacency\n",
"def get_chunk_size(N, n):\n \"\"\"Given a two-dimensional array with a dimension of size 'N', \n determine the number of rows or columns that can fit into memory.\n\n Parameters\n ----------\n N : int\n The size of one of the dimensions of a two-dimensional array. \n\n n : int\n The number of arrays of size 'N' times 'chunk_size' that can fit in memory.\n\n Returns\n -------\n chunk_size : int\n The size of the dimension orthogonal to the one of size 'N'. \n \"\"\"\n\n mem_free = memory()['free']\n if mem_free > 60000000:\n chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 40000000:\n chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 14000000:\n chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 8000000:\n chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 2000000:\n chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 1000000:\n chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))\n return chunk_size\n else:\n print(\"\\nERROR: Cluster_Ensembles: get_chunk_size: \"\n \"this machine does not have enough free memory resources \"\n \"to perform ensemble clustering.\\n\")\n sys.exit(1)\n",
"def get_compression_filter(byte_counts):\n \"\"\"Determine whether or not to use a compression on the array stored in\n a hierarchical data format, and which compression library to use to that purpose.\n Compression reduces the HDF5 file size and also helps improving I/O efficiency\n for large datasets.\n\n Parameters\n ----------\n byte_counts : int\n\n Returns\n -------\n FILTERS : instance of the tables.Filters class\n \"\"\"\n\n assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0\n\n if 2 * byte_counts > 1000 * memory()['free']:\n try:\n FILTERS = tables.filters(complevel = 5, complib = 'blosc', \n shuffle = True, least_significant_digit = 6)\n except tables.FiltersWarning:\n FILTERS = tables.filters(complevel = 5, complib = 'lzo', \n shuffle = True, least_significant_digit = 6) \n else:\n FILTERS = None\n\n return FILTERS\n",
"def one_to_max(array_in):\n \"\"\"Alter a vector of cluster labels to a dense mapping. \n Given that this function is herein always called after passing \n a vector to the function checkcl, one_to_max relies on the assumption \n that cluster_run does not contain any NaN entries.\n\n Parameters\n ----------\n array_in : a list or one-dimensional array\n The list of cluster IDs to be processed.\n\n Returns\n -------\n result : one-dimensional array\n A massaged version of the input vector of cluster identities.\n \"\"\"\n\n x = np.asanyarray(array_in)\n N_in = x.size\n array_in = x.reshape(N_in) \n\n sorted_array = np.sort(array_in)\n sorting_indices = np.argsort(array_in)\n\n last = np.nan\n current_index = -1\n for i in range(N_in):\n if last != sorted_array[i] or np.isnan(last):\n last = sorted_array[i]\n current_index += 1\n\n sorted_array[i] = current_index\n\n result = np.empty(N_in, dtype = int)\n result[sorting_indices] = sorted_array\n\n return result\n",
"def cmetis(hdf5_file_name, N_clusters_max, w = None):\n \"\"\"Returns cluster labellings ranging from 1 to N_clusters_max \n for hypergraph partitioning involved in MCLA.\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n N_clusters_max : int\n\n w : array, optiona (default = None)\n\n Returns\n -------\n labels : array of shape (n_samples,)\n A vector of labels denoting the cluster to which each sample has been assigned\n as a result of the MCLA approximation algorithm for consensus clustering.\n\n Reference\n ---------\n G. Karypis and V. Kumar, \"A Fast and High Quality Multilevel Scheme for\n Partitioning Irregular Graphs\"\n In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.\n \"\"\"\n\n file_name = wgraph(hdf5_file_name, w, 1)\n labels = sgraph(N_clusters_max, file_name)\n labels = one_to_max(labels)\n\n subprocess.call(['rm', file_name])\n\n return labels\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | create_membership_matrix | python | def create_membership_matrix(cluster_run):
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size)) | For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L884-L919 | null | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | metis | python | def metis(hdf5_file_name, N_clusters_max):
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels | METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L922-L949 | [
"def wgraph(hdf5_file_name, w = None, method = 0):\n \"\"\"Write a graph file in a format apposite to later use by METIS or HMETIS.\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n w : list or array, optional (default = None)\n\n method : int, optional (default = 0)\n\n Returns\n -------\n file_name : string\n \"\"\"\n\n print('\\n#')\n\n if method == 0:\n fileh = tables.open_file(hdf5_file_name, 'r+')\n e_mat = fileh.root.consensus_group.similarities_CSPA\n file_name = 'wgraph_CSPA'\n elif method == 1:\n fileh = tables.open_file(hdf5_file_name, 'r+')\n e_mat = fileh.root.consensus_group.similarities_MCLA\n file_name = 'wgraph_MCLA'\n elif method in {2, 3}:\n hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)\n e_mat = hypergraph_adjacency.copy().transpose()\n file_name = 'wgraph_HGPA'\n fileh = tables.open_file(hdf5_file_name, 'r+')\n else:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: wgraph: \"\n \"invalid code for choice of method; \"\n \"choose either 0, 1, 2 or 3.\")\n\n if w is None:\n w = []\n\n N_rows = e_mat.shape[0]\n N_cols = e_mat.shape[1]\n\n if method in {0, 1}:\n diag_ind = np.diag_indices(N_rows)\n e_mat[diag_ind] = 0\n\n if method == 1:\n scale_factor = 100.0\n w_sum_before = np.sum(w)\n w *= scale_factor\n w = np.rint(w)\n\n with open(file_name, 'w') as file:\n print(\"INFO: Cluster_Ensembles: wgraph: writing {}.\".format(file_name))\n\n if method == 0:\n sz = float(np.sum(e_mat[:] > 0)) / 2\n if int(sz) == 0:\n return 'DO_NOT_PROCESS'\n else:\n file.write('{} {} 1\\n'.format(N_rows, int(sz)))\n elif method == 1:\n chunks_size = get_chunk_size(N_cols, 2)\n N_chunks, remainder = divmod(N_rows, chunks_size)\n if N_chunks == 0:\n sz = float(np.sum(e_mat[:] > 0)) / 2\n else:\n sz = 0\n for i in range(N_chunks):\n M = e_mat[i*chunks_size:(i+1)*chunks_size]\n sz += float(np.sum(M > 0))\n if remainder != 0:\n M = e_mat[N_chunks*chunks_size:N_rows]\n sz += float(np.sum(M > 0))\n sz = float(sz) / 2 \n file.write('{} {} 11\\n'.format(N_rows, int(sz)))\n else:\n file.write('{} {} 1\\n'.format(N_cols, N_rows))\n\n if method in {0, 1}:\n chunks_size = get_chunk_size(N_cols, 2)\n for i in range(0, N_rows, chunks_size):\n M = e_mat[i:min(i+chunks_size, N_rows)]\n\n for j in range(M.shape[0]):\n edges = np.where(M[j] > 0)[0]\n weights = M[j, edges]\n\n if method == 0:\n interlaced = np.zeros(2 * edges.size, dtype = int)\n # METIS and hMETIS have vertices numbering starting from 1:\n interlaced[::2] = edges + 1 \n interlaced[1::2] = weights\n else:\n interlaced = np.zeros(1 + 2 * edges.size, dtype = int)\n interlaced[0] = w[i + j]\n # METIS and hMETIS have vertices numbering starting from 1:\n interlaced[1::2] = edges + 1 \n interlaced[2::2] = weights\n\n for elt in interlaced:\n file.write('{} '.format(int(elt)))\n file.write('\\n') \n else:\n print(\"INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} \"\n \"non-zero hyper-edges.\".format(**locals()))\n\n chunks_size = get_chunk_size(N_rows, 2)\n for i in range(0, N_cols, chunks_size):\n M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())\n for j in range(M.shape[1]):\n edges = np.where(M[:, j] > 0)[0]\n if method == 2:\n weight = np.array(M[:, j].sum(), dtype = int)\n else:\n weight = w[i + j]\n # METIS and hMETIS require vertices numbering starting from 1:\n interlaced = np.append(weight, edges + 1) \n\n for elt in interlaced:\n file.write('{} '.format(int(elt)))\n file.write('\\n')\n\n if method in {0, 1}:\n fileh.remove_node(fileh.root.consensus_group, e_mat.name)\n\n fileh.close()\n\n print('#')\n\n return file_name\n",
"def sgraph(N_clusters_max, file_name):\n \"\"\"Runs METIS or hMETIS and returns the labels found by those \n (hyper-)graph partitioning algorithms.\n\n Parameters\n ----------\n N_clusters_max : int\n\n file_name : string\n\n Returns\n -------\n labels : array of shape (n_samples,)\n A vector of labels denoting the cluster to which each sample has been assigned\n as a result of any of three approximation algorithms for consensus clustering \n (either of CSPA, HGPA or MCLA).\n \"\"\"\n\n if file_name == 'DO_NOT_PROCESS':\n return []\n\n print('\\n#')\n\n k = str(N_clusters_max)\n out_name = file_name + '.part.' + k\n if file_name == 'wgraph_HGPA':\n print(\"INFO: Cluster_Ensembles: sgraph: \"\n \"calling shmetis for hypergraph partitioning.\")\n\n if sys.platform.startswith('linux'):\n shmetis_path = pkg_resources.resource_filename(__name__, \n 'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')\n elif sys.platform.startswith('darwin'):\n shmetis_path = pkg_resources.resource_filename(__name__, \n 'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')\n else:\n print(\"ERROR: Cluster_Ensembles: sgraph:\\n\"\n \"your platform is not supported. Some code required for graph partition \"\n \"is only available for Linux distributions and OS X.\")\n sys.exit(1)\n\n args = \"{0} ./\".format(shmetis_path) + file_name + \" \" + k + \" 15\"\n subprocess.call(args, shell = True)\n elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':\n print(\"INFO: Cluster_Ensembles: sgraph: \"\n \"calling gpmetis for graph partitioning.\")\n args = \"gpmetis ./\" + file_name + \" \" + k\n subprocess.call(args, shell = True)\n else:\n raise NameError(\"ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable \"\n \"file-name.\".format(file_name))\n\n labels = np.empty(0, dtype = int)\n with open(out_name, 'r') as file:\n print(\"INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; \"\n \"loading {}\".format(out_name))\n labels = np.loadtxt(out_name, dtype = int)\n labels = labels.reshape(labels.size)\n labels = one_to_max(labels) \n\n subprocess.call(['rm', out_name])\n\n print('#')\n\n return labels\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | hmetis | python | def hmetis(hdf5_file_name, N_clusters_max, w = None):
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels | Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999. | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L952-L987 | [
"def one_to_max(array_in):\n \"\"\"Alter a vector of cluster labels to a dense mapping. \n Given that this function is herein always called after passing \n a vector to the function checkcl, one_to_max relies on the assumption \n that cluster_run does not contain any NaN entries.\n\n Parameters\n ----------\n array_in : a list or one-dimensional array\n The list of cluster IDs to be processed.\n\n Returns\n -------\n result : one-dimensional array\n A massaged version of the input vector of cluster identities.\n \"\"\"\n\n x = np.asanyarray(array_in)\n N_in = x.size\n array_in = x.reshape(N_in) \n\n sorted_array = np.sort(array_in)\n sorting_indices = np.argsort(array_in)\n\n last = np.nan\n current_index = -1\n for i in range(N_in):\n if last != sorted_array[i] or np.isnan(last):\n last = sorted_array[i]\n current_index += 1\n\n sorted_array[i] = current_index\n\n result = np.empty(N_in, dtype = int)\n result[sorting_indices] = sorted_array\n\n return result\n",
"def wgraph(hdf5_file_name, w = None, method = 0):\n \"\"\"Write a graph file in a format apposite to later use by METIS or HMETIS.\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n w : list or array, optional (default = None)\n\n method : int, optional (default = 0)\n\n Returns\n -------\n file_name : string\n \"\"\"\n\n print('\\n#')\n\n if method == 0:\n fileh = tables.open_file(hdf5_file_name, 'r+')\n e_mat = fileh.root.consensus_group.similarities_CSPA\n file_name = 'wgraph_CSPA'\n elif method == 1:\n fileh = tables.open_file(hdf5_file_name, 'r+')\n e_mat = fileh.root.consensus_group.similarities_MCLA\n file_name = 'wgraph_MCLA'\n elif method in {2, 3}:\n hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)\n e_mat = hypergraph_adjacency.copy().transpose()\n file_name = 'wgraph_HGPA'\n fileh = tables.open_file(hdf5_file_name, 'r+')\n else:\n raise ValueError(\"\\nERROR: Cluster_Ensembles: wgraph: \"\n \"invalid code for choice of method; \"\n \"choose either 0, 1, 2 or 3.\")\n\n if w is None:\n w = []\n\n N_rows = e_mat.shape[0]\n N_cols = e_mat.shape[1]\n\n if method in {0, 1}:\n diag_ind = np.diag_indices(N_rows)\n e_mat[diag_ind] = 0\n\n if method == 1:\n scale_factor = 100.0\n w_sum_before = np.sum(w)\n w *= scale_factor\n w = np.rint(w)\n\n with open(file_name, 'w') as file:\n print(\"INFO: Cluster_Ensembles: wgraph: writing {}.\".format(file_name))\n\n if method == 0:\n sz = float(np.sum(e_mat[:] > 0)) / 2\n if int(sz) == 0:\n return 'DO_NOT_PROCESS'\n else:\n file.write('{} {} 1\\n'.format(N_rows, int(sz)))\n elif method == 1:\n chunks_size = get_chunk_size(N_cols, 2)\n N_chunks, remainder = divmod(N_rows, chunks_size)\n if N_chunks == 0:\n sz = float(np.sum(e_mat[:] > 0)) / 2\n else:\n sz = 0\n for i in range(N_chunks):\n M = e_mat[i*chunks_size:(i+1)*chunks_size]\n sz += float(np.sum(M > 0))\n if remainder != 0:\n M = e_mat[N_chunks*chunks_size:N_rows]\n sz += float(np.sum(M > 0))\n sz = float(sz) / 2 \n file.write('{} {} 11\\n'.format(N_rows, int(sz)))\n else:\n file.write('{} {} 1\\n'.format(N_cols, N_rows))\n\n if method in {0, 1}:\n chunks_size = get_chunk_size(N_cols, 2)\n for i in range(0, N_rows, chunks_size):\n M = e_mat[i:min(i+chunks_size, N_rows)]\n\n for j in range(M.shape[0]):\n edges = np.where(M[j] > 0)[0]\n weights = M[j, edges]\n\n if method == 0:\n interlaced = np.zeros(2 * edges.size, dtype = int)\n # METIS and hMETIS have vertices numbering starting from 1:\n interlaced[::2] = edges + 1 \n interlaced[1::2] = weights\n else:\n interlaced = np.zeros(1 + 2 * edges.size, dtype = int)\n interlaced[0] = w[i + j]\n # METIS and hMETIS have vertices numbering starting from 1:\n interlaced[1::2] = edges + 1 \n interlaced[2::2] = weights\n\n for elt in interlaced:\n file.write('{} '.format(int(elt)))\n file.write('\\n') \n else:\n print(\"INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} \"\n \"non-zero hyper-edges.\".format(**locals()))\n\n chunks_size = get_chunk_size(N_rows, 2)\n for i in range(0, N_cols, chunks_size):\n M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())\n for j in range(M.shape[1]):\n edges = np.where(M[:, j] > 0)[0]\n if method == 2:\n weight = np.array(M[:, j].sum(), dtype = int)\n else:\n weight = w[i + j]\n # METIS and hMETIS require vertices numbering starting from 1:\n interlaced = np.append(weight, edges + 1) \n\n for elt in interlaced:\n file.write('{} '.format(int(elt)))\n file.write('\\n')\n\n if method in {0, 1}:\n fileh.remove_node(fileh.root.consensus_group, e_mat.name)\n\n fileh.close()\n\n print('#')\n\n return file_name\n",
"def sgraph(N_clusters_max, file_name):\n \"\"\"Runs METIS or hMETIS and returns the labels found by those \n (hyper-)graph partitioning algorithms.\n\n Parameters\n ----------\n N_clusters_max : int\n\n file_name : string\n\n Returns\n -------\n labels : array of shape (n_samples,)\n A vector of labels denoting the cluster to which each sample has been assigned\n as a result of any of three approximation algorithms for consensus clustering \n (either of CSPA, HGPA or MCLA).\n \"\"\"\n\n if file_name == 'DO_NOT_PROCESS':\n return []\n\n print('\\n#')\n\n k = str(N_clusters_max)\n out_name = file_name + '.part.' + k\n if file_name == 'wgraph_HGPA':\n print(\"INFO: Cluster_Ensembles: sgraph: \"\n \"calling shmetis for hypergraph partitioning.\")\n\n if sys.platform.startswith('linux'):\n shmetis_path = pkg_resources.resource_filename(__name__, \n 'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')\n elif sys.platform.startswith('darwin'):\n shmetis_path = pkg_resources.resource_filename(__name__, \n 'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')\n else:\n print(\"ERROR: Cluster_Ensembles: sgraph:\\n\"\n \"your platform is not supported. Some code required for graph partition \"\n \"is only available for Linux distributions and OS X.\")\n sys.exit(1)\n\n args = \"{0} ./\".format(shmetis_path) + file_name + \" \" + k + \" 15\"\n subprocess.call(args, shell = True)\n elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':\n print(\"INFO: Cluster_Ensembles: sgraph: \"\n \"calling gpmetis for graph partitioning.\")\n args = \"gpmetis ./\" + file_name + \" \" + k\n subprocess.call(args, shell = True)\n else:\n raise NameError(\"ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable \"\n \"file-name.\".format(file_name))\n\n labels = np.empty(0, dtype = int)\n with open(out_name, 'r') as file:\n print(\"INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; \"\n \"loading {}\".format(out_name))\n labels = np.loadtxt(out_name, dtype = int)\n labels = labels.reshape(labels.size)\n labels = one_to_max(labels) \n\n subprocess.call(['rm', out_name])\n\n print('#')\n\n return labels\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | wgraph | python | def wgraph(hdf5_file_name, w = None, method = 0):
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name | Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1024-L1154 | [
"def load_hypergraph_adjacency(hdf5_file_name):\n \"\"\"\n\n Parameters\n ----------\n hdf5_file_name : file handle or string\n\n Returns\n -------\n hypergraph_adjacency : compressed sparse row matrix\n \"\"\"\n\n with tables.open_file(hdf5_file_name, 'r+') as fileh:\n pars = []\n for par in ('data', 'indices', 'indptr', 'shape'):\n pars.append(getattr(fileh.root.consensus_group, par).read())\n\n hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])\n\n return hypergraph_adjacency\n",
"def get_chunk_size(N, n):\n \"\"\"Given a two-dimensional array with a dimension of size 'N', \n determine the number of rows or columns that can fit into memory.\n\n Parameters\n ----------\n N : int\n The size of one of the dimensions of a two-dimensional array. \n\n n : int\n The number of arrays of size 'N' times 'chunk_size' that can fit in memory.\n\n Returns\n -------\n chunk_size : int\n The size of the dimension orthogonal to the one of size 'N'. \n \"\"\"\n\n mem_free = memory()['free']\n if mem_free > 60000000:\n chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 40000000:\n chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 14000000:\n chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 8000000:\n chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 2000000:\n chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))\n return chunk_size\n elif mem_free > 1000000:\n chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))\n return chunk_size\n else:\n print(\"\\nERROR: Cluster_Ensembles: get_chunk_size: \"\n \"this machine does not have enough free memory resources \"\n \"to perform ensemble clustering.\\n\")\n sys.exit(1)\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | sgraph | python | def sgraph(N_clusters_max, file_name):
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels | Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA). | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1157-L1221 | [
"def one_to_max(array_in):\n \"\"\"Alter a vector of cluster labels to a dense mapping. \n Given that this function is herein always called after passing \n a vector to the function checkcl, one_to_max relies on the assumption \n that cluster_run does not contain any NaN entries.\n\n Parameters\n ----------\n array_in : a list or one-dimensional array\n The list of cluster IDs to be processed.\n\n Returns\n -------\n result : one-dimensional array\n A massaged version of the input vector of cluster identities.\n \"\"\"\n\n x = np.asanyarray(array_in)\n N_in = x.size\n array_in = x.reshape(N_in) \n\n sorted_array = np.sort(array_in)\n sorting_indices = np.argsort(array_in)\n\n last = np.nan\n current_index = -1\n for i in range(N_in):\n if last != sorted_array[i] or np.isnan(last):\n last = sorted_array[i]\n current_index += 1\n\n sorted_array[i] = current_index\n\n result = np.empty(N_in, dtype = int)\n result[sorting_indices] = sorted_array\n\n return result\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
GGiecold/Cluster_Ensembles | src/Cluster_Ensembles/Cluster_Ensembles.py | overlap_matrix | python | def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency | Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency : | train | https://github.com/GGiecold/Cluster_Ensembles/blob/d1b1ce9f541fc937ac7c677e964520e0e9163dc7/src/Cluster_Ensembles/Cluster_Ensembles.py#L1224-L1322 | [
"def get_compression_filter(byte_counts):\n \"\"\"Determine whether or not to use a compression on the array stored in\n a hierarchical data format, and which compression library to use to that purpose.\n Compression reduces the HDF5 file size and also helps improving I/O efficiency\n for large datasets.\n\n Parameters\n ----------\n byte_counts : int\n\n Returns\n -------\n FILTERS : instance of the tables.Filters class\n \"\"\"\n\n assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0\n\n if 2 * byte_counts > 1000 * memory()['free']:\n try:\n FILTERS = tables.filters(complevel = 5, complib = 'blosc', \n shuffle = True, least_significant_digit = 6)\n except tables.FiltersWarning:\n FILTERS = tables.filters(complevel = 5, complib = 'lzo', \n shuffle = True, least_significant_digit = 6) \n else:\n FILTERS = None\n\n return FILTERS\n",
"def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):\n \"\"\"Compute a weighted average of the mutual information with the known labels, \n the weights being proportional to the fraction of known labels.\n\n Parameters\n ----------\n cluster_runs : array of shape (n_partitions, n_samples)\n Each row of this matrix is such that the i-th entry corresponds to the\n cluster ID to which the i-th sample of the data-set has been classified\n by this particular clustering. Samples not selected for clustering\n in a given round are are tagged by an NaN.\n\n cluster_ensemble : array of shape (n_samples,), optional (default = None)\n The identity of the cluster to which each sample of the whole data-set \n belong to according to consensus clustering.\n\n verbose : Boolean, optional (default = False)\n Specifies if status messages will be displayed\n on the standard output.\n\n Returns\n -------\n unnamed variable : float\n The weighted average of the mutual information between\n the consensus clustering and the many runs from the ensemble\n of independent clusterings on subsamples of the data-set.\n \"\"\"\n\n if cluster_ensemble is None:\n return 0.0\n\n if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):\n cluster_runs = cluster_runs.reshape(1, -1)\n\n weighted_average_mutual_information = 0\n\n N_labelled_indices = 0\n\n for i in range(cluster_runs.shape[0]):\n labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]\n N = labelled_indices.size\n\n x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)\n y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)\n\n q = normalized_mutual_info_score(x, y)\n\n weighted_average_mutual_information += q * N\n N_labelled_indices += N\n\n return float(weighted_average_mutual_information) / N_labelled_indices\n"
] | #!/usr/bin/env python
# Cluster_Ensembles/src/Cluster_Ensembles/Cluster_Ensembles.py;
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com, ggiecold@jimmy.harvard.edu
"""Cluster_Ensembles is a package for combining multiple partitions
into a consolidated clustering.
The combinatorial optimization problem of obtaining such a consensus clustering
is reformulated in terms of approximation algorithms for
graph or hyper-graph partitioning.
References
----------
* Giecold, G., Marco, E., Trippa, L. and Yuan, G.-C.,
"Robust Lineage Reconstruction from High-Dimensional Single-Cell Data".
ArXiv preprint [q-bio.QM, stat.AP, stat.CO, stat.ML]: http://arxiv.org/abs/1601.02748
* Strehl, A. and Ghosh, J., "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
* Kernighan, B. W. and Lin, S., "An Efficient Heuristic Procedure
for Partitioning Graphs".
In: The Bell System Technical Journal, 49, 2, pp. 291-307. 1970
* Karypis, G. and Kumar, V., "A Fast and High Quality Multilevel Scheme
for Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, 20, 1, pp. 359-392. 1998
* Karypis, G., Aggarwal, R., Kumar, V. and Shekhar, S., "Multilevel Hypergraph Partitioning:
Applications in the VLSI Domain".
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 7, 1, pp. 69-79. 1999
"""
import functools
import gc
import numbers
import numpy as np
import operator
import pkg_resources
import psutil
import scipy.sparse
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import normalized_mutual_info_score
import subprocess
import sys
import tables
import warnings
import six
from six.moves import range
from functools import reduce
np.seterr(invalid = 'ignore')
warnings.filterwarnings('ignore', category = DeprecationWarning)
__all__ = ['cluster_ensembles', 'CSPA', 'HGPA', 'load_hypergraph_adjacency',
'MCLA', 'overlap_matrix']
def memory():
"""Determine memory specifications of the machine.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = dict()
for k, v in psutil.virtual_memory()._asdict().items():
mem_info[k] = int(v)
return mem_info
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1)
def get_compression_filter(byte_counts):
"""Determine whether or not to use a compression on the array stored in
a hierarchical data format, and which compression library to use to that purpose.
Compression reduces the HDF5 file size and also helps improving I/O efficiency
for large datasets.
Parameters
----------
byte_counts : int
Returns
-------
FILTERS : instance of the tables.Filters class
"""
assert isinstance(byte_counts, numbers.Integral) and byte_counts > 0
if 2 * byte_counts > 1000 * memory()['free']:
try:
FILTERS = tables.filters(complevel = 5, complib = 'blosc',
shuffle = True, least_significant_digit = 6)
except tables.FiltersWarning:
FILTERS = tables.filters(complevel = 5, complib = 'lzo',
shuffle = True, least_significant_digit = 6)
else:
FILTERS = None
return FILTERS
def build_hypergraph_adjacency(cluster_runs):
"""Return the adjacency matrix to a hypergraph, in sparse matrix representation.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
Represents the hypergraph associated with an ensemble of partitions,
each partition corresponding to a row of the array 'cluster_runs'
provided at input.
"""
N_runs = cluster_runs.shape[0]
hypergraph_adjacency = create_membership_matrix(cluster_runs[0])
for i in range(1, N_runs):
hypergraph_adjacency = scipy.sparse.vstack([hypergraph_adjacency,
create_membership_matrix(cluster_runs[i])],
format = 'csr')
return hypergraph_adjacency
def store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name):
"""Write an hypergraph adjacency to disk to disk in an HDF5 data structure.
Parameters
----------
hypergraph_adjacency : compressed sparse row matrix
hdf5_file_name : file handle or string
"""
assert(hypergraph_adjacency.__class__ == scipy.sparse.csr.csr_matrix)
byte_counts = hypergraph_adjacency.data.nbytes + hypergraph_adjacency.indices.nbytes + hypergraph_adjacency.indptr.nbytes
FILTERS = get_compression_filter(byte_counts)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
for par in ('data', 'indices', 'indptr', 'shape'):
try:
n = getattr(fileh.root.consensus_group, par)
n._f_remove()
except AttributeError:
pass
array = np.array(getattr(hypergraph_adjacency, par))
atom = tables.Atom.from_dtype(array.dtype)
ds = fileh.create_carray(fileh.root.consensus_group, par, atom,
array.shape, filters = FILTERS)
ds[:] = array
def load_hypergraph_adjacency(hdf5_file_name):
"""
Parameters
----------
hdf5_file_name : file handle or string
Returns
-------
hypergraph_adjacency : compressed sparse row matrix
"""
with tables.open_file(hdf5_file_name, 'r+') as fileh:
pars = []
for par in ('data', 'indices', 'indptr', 'shape'):
pars.append(getattr(fileh.root.consensus_group, par).read())
hypergraph_adjacency = scipy.sparse.csr_matrix(tuple(pars[:3]), shape = pars[3])
return hypergraph_adjacency
def cluster_ensembles(cluster_runs, hdf5_file_name = None, verbose = False, N_clusters_max = None):
"""Call up to three different functions for heuristic ensemble clustering
(namely CSPA, HGPA and MCLA) then select as the definitive
consensus clustering the one with the highest average mutual information score
between its vector of consensus labels and the vectors of labels associated to each
partition from the ensemble.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
hdf5_file_name : file object or string, optional (default = None)
The handle or name of an HDF5 file where any array needed
for consensus_clustering and too large to fit into memory
is to be stored. Created if not specified at input.
verbose : Boolean, optional (default = False)
Specifies if messages concerning the status of the many functions
subsequently called 'cluster_ensembles' will be displayed
on the standard output.
N_clusters_max : int, optional
The number of clusters in which to partition the samples into
a consensus clustering. This defaults to the highest number of clusters
encountered in the sets of independent clusterings on subsamples
of the data-set (i.e. the maximum of the entries in "cluster_runs").
Returns
-------
cluster_ensemble : array of shape (n_samples,)
For the final ensemble clustering, this vector contains the
cluster IDs of each sample in the whole data-set.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
if hdf5_file_name is None:
hdf5_file_name = './Cluster_Ensembles.h5'
fileh = tables.open_file(hdf5_file_name, 'w')
fileh.create_group(fileh.root, 'consensus_group')
fileh.close()
cluster_ensemble = []
score = np.empty(0)
if cluster_runs.shape[1] > 10000:
consensus_functions = [HGPA, MCLA]
function_names = ['HGPA', 'MCLA']
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"due to a rather large number of cells in your data-set, "
"using only 'HyperGraph Partitioning Algorithm' (HGPA) "
"and 'Meta-CLustering Algorithm' (MCLA) "
"as ensemble consensus functions.\n")
else:
consensus_functions = [CSPA, HGPA, MCLA]
function_names = ['CSPA', 'HGPA', 'MCLA']
hypergraph_adjacency = build_hypergraph_adjacency(cluster_runs)
store_hypergraph_adjacency(hypergraph_adjacency, hdf5_file_name)
for i in range(len(consensus_functions)):
cluster_ensemble.append(consensus_functions[i](hdf5_file_name, cluster_runs, verbose, N_clusters_max))
score = np.append(score, ceEvalMutual(cluster_runs, cluster_ensemble[i], verbose))
print("\nINFO: Cluster_Ensembles: cluster_ensembles: "
"{0} at {1}.".format(function_names[i], score[i]))
print('*****')
return cluster_ensemble[np.argmax(score)]
def ceEvalMutual(cluster_runs, cluster_ensemble = None, verbose = False):
"""Compute a weighted average of the mutual information with the known labels,
the weights being proportional to the fraction of known labels.
Parameters
----------
cluster_runs : array of shape (n_partitions, n_samples)
Each row of this matrix is such that the i-th entry corresponds to the
cluster ID to which the i-th sample of the data-set has been classified
by this particular clustering. Samples not selected for clustering
in a given round are are tagged by an NaN.
cluster_ensemble : array of shape (n_samples,), optional (default = None)
The identity of the cluster to which each sample of the whole data-set
belong to according to consensus clustering.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
unnamed variable : float
The weighted average of the mutual information between
the consensus clustering and the many runs from the ensemble
of independent clusterings on subsamples of the data-set.
"""
if cluster_ensemble is None:
return 0.0
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
weighted_average_mutual_information = 0
N_labelled_indices = 0
for i in range(cluster_runs.shape[0]):
labelled_indices = np.where(np.isfinite(cluster_runs[i]))[0]
N = labelled_indices.size
x = np.reshape(checkcl(cluster_ensemble[labelled_indices], verbose), newshape = N)
y = np.reshape(checkcl(np.rint(cluster_runs[i, labelled_indices]), verbose), newshape = N)
q = normalized_mutual_info_score(x, y)
weighted_average_mutual_information += q * N
N_labelled_indices += N
return float(weighted_average_mutual_information) / N_labelled_indices
def checkcl(cluster_run, verbose = False):
"""Ensure that a cluster labelling is in a valid format.
Parameters
----------
cluster_run : array of shape (n_samples,)
A vector of cluster IDs for each of the samples selected for a given
round of clustering. The samples not selected are labelled with NaN.
verbose : Boolean, optional (default = False)
Specifies if status messages will be displayed
on the standard output.
Returns
-------
cluster_run : array of shape (n_samples,)
The input vector is modified in place, such that invalid values are
either rejected or altered. In particular, the labelling of cluster IDs
starts at zero and increases by 1 without any gap left.
"""
cluster_run = np.asanyarray(cluster_run)
if cluster_run.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkcl: "
"empty vector provided as input.\n")
elif reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: checkl: "
"problem in dimensions of the cluster label vector "
"under consideration.\n")
elif np.where(np.isnan(cluster_run))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checkl: vector of cluster "
"labellings provided as input contains at least one 'NaN'.\n")
else:
min_label = np.amin(cluster_run)
if min_label < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: detected negative values "
"as cluster labellings.")
cluster_run -= min_label
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: "
"offset to a minimum value of '0'.")
x = one_to_max(cluster_run)
if np.amax(cluster_run) != np.amax(x):
if verbose:
print("\nINFO: Cluster_Ensembles: checkcl: the vector cluster "
"labellings provided is not a dense integer mapping.")
cluster_run = x
if verbose:
print("INFO: Cluster_Ensembles: checkcl: brought modification "
"to this vector so that its labels range "
"from 0 to {0}, included.\n".format(np.amax(cluster_run)))
return cluster_run
def one_to_max(array_in):
"""Alter a vector of cluster labels to a dense mapping.
Given that this function is herein always called after passing
a vector to the function checkcl, one_to_max relies on the assumption
that cluster_run does not contain any NaN entries.
Parameters
----------
array_in : a list or one-dimensional array
The list of cluster IDs to be processed.
Returns
-------
result : one-dimensional array
A massaged version of the input vector of cluster identities.
"""
x = np.asanyarray(array_in)
N_in = x.size
array_in = x.reshape(N_in)
sorted_array = np.sort(array_in)
sorting_indices = np.argsort(array_in)
last = np.nan
current_index = -1
for i in range(N_in):
if last != sorted_array[i] or np.isnan(last):
last = sorted_array[i]
current_index += 1
sorted_array[i] = current_index
result = np.empty(N_in, dtype = int)
result[sorting_indices] = sorted_array
return result
def checks(similarities, verbose = False):
"""Check that a matrix is a proper similarity matrix and bring
appropriate changes if applicable.
Parameters
----------
similarities : array of shape (n_samples, n_samples)
A matrix of pairwise similarities between (sub)-samples of the data-set.
verbose : Boolean, optional (default = False)
Alerts of any issue with the similarities matrix provided
and of any step possibly taken to remediate such problem.
"""
if similarities.size == 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: the similarities "
"matrix provided as input happens to be empty.\n")
elif np.where(np.isnan(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: input similarities "
"matrix contains at least one 'NaN'.\n")
elif np.where(np.isinf(similarities))[0].size != 0:
raise ValueError("\nERROR: Cluster_Ensembles: checks: at least one infinite entry "
"detected in input similarities matrix.\n")
else:
if np.where(np.logical_not(np.isreal(similarities)))[0].size != 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: complex entries found "
"in the similarities matrix.")
similarities = similarities.real
if verbose:
print("\nINFO: Cluster_Ensembles: checks: "
"truncated to their real components.")
if similarities.shape[0] != similarities.shape[1]:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-square matrix provided.")
N_square = min(similarities.shape)
similarities = similarities[:N_square, :N_square]
if verbose:
print("\nINFO: Cluster_Ensembles: checks: using largest square sub-matrix.")
max_sim = np.amax(similarities)
min_sim = np.amin(similarities)
if max_sim > 1 or min_sim < 0:
if verbose:
print("\nINFO: Cluster_Ensembles: checks: strictly negative "
"or bigger than unity entries spotted in input similarities matrix.")
indices_too_big = np.where(similarities > 1)
indices_negative = np.where(similarities < 0)
similarities[indices_too_big] = 1.0
similarities[indices_negative] = 0.0
if verbose:
print("\nINFO: Cluster_Ensembles: checks: done setting them to "
"the lower or upper accepted values.")
if not np.allclose(similarities, np.transpose(similarities)):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: non-symmetric input "
"similarities matrix.")
similarities = np.divide(similarities + np.transpose(similarities), 2.0)
if verbose:
print("\nINFO: Cluster_Ensembles: checks: now symmetrized.")
if not np.allclose(np.diag(similarities), np.ones(similarities.shape[0])):
if verbose:
print("\nINFO: Cluster_Ensembles: checks: the self-similarities "
"provided as input are not all of unit value.")
similarities[np.diag_indices(similarities.shape[0])] = 1
if verbose:
print("\nINFO: Cluster_Ensembles: checks: issue corrected.")
def CSPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Cluster-based Similarity Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the CSPA heuristics for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('*****')
print("INFO: Cluster_Ensembles: CSPA: consensus clustering using CSPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
if N_samples > 20000:
raise ValueError("\nERROR: Cluster_Ensembles: CSPA: cannot efficiently "
"deal with too large a number of cells.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
s = scipy.sparse.csr_matrix.dot(hypergraph_adjacency.transpose().tocsr(), hypergraph_adjacency)
s = np.squeeze(np.asarray(s.todense()))
del hypergraph_adjacency
gc.collect()
checks(np.divide(s, float(N_runs)), verbose)
e_sum_before = s.sum()
sum_after = 100000000.0
scale_factor = sum_after / float(e_sum_before)
with tables.open_file(hdf5_file_name, 'r+') as fileh:
atom = tables.Float32Atom()
FILTERS = get_compression_filter(4 * (N_samples ** 2))
S = fileh.create_carray(fileh.root.consensus_group, 'similarities_CSPA', atom,
(N_samples, N_samples), "Matrix of similarities arising "
"in Cluster-based Similarity Partitioning",
filters = FILTERS)
expr = tables.Expr("s * scale_factor")
expr.set_output(S)
expr.eval()
chunks_size = get_chunk_size(N_samples, 3)
for i in range(0, N_samples, chunks_size):
tmp = S[i:min(i+chunks_size, N_samples)]
S[i:min(i+chunks_size, N_samples)] = np.rint(tmp)
return metis(hdf5_file_name, N_clusters_max)
def HGPA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""HyperGraph-Partitioning Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : string or file handle
cluster_runs: array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the HGPA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print("INFO: Cluster_Ensembles: HGPA: consensus clustering using HGPA.")
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
return hmetis(hdf5_file_name, N_clusters_max)
def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None):
"""Meta-CLustering Algorithm for a consensus function.
Parameters
----------
hdf5_file_name : file handle or string
cluster_runs : array of shape (n_partitions, n_samples)
verbose : bool, optional (default = False)
N_clusters_max : int, optional (default = None)
Returns
-------
A vector specifying the cluster label to which each sample has been assigned
by the MCLA approximation algorithm for consensus clustering.
Reference
---------
A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework
for Combining Multiple Partitions".
In: Journal of Machine Learning Research, 3, pp. 583-617. 2002
"""
print('\n*****')
print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.')
if N_clusters_max == None:
N_clusters_max = int(np.nanmax(cluster_runs)) + 1
N_runs = cluster_runs.shape[0]
N_samples = cluster_runs.shape[1]
print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.")
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
w = hypergraph_adjacency.sum(axis = 1)
N_rows = hypergraph_adjacency.shape[0]
print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. "
"Starting computation of Jaccard similarity matrix.")
# Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix.
with tables.open_file(hdf5_file_name, 'r+') as fileh:
FILTERS = get_compression_filter(4 * (N_rows ** 2))
similarities_MCLA = fileh.create_carray(fileh.root.consensus_group,
'similarities_MCLA', tables.Float32Atom(),
(N_rows, N_rows), "Matrix of pairwise Jaccard "
"similarity scores", filters = FILTERS)
scale_factor = 100.0
print("INFO: Cluster_Ensembles: MCLA: "
"starting computation of Jaccard similarity matrix.")
squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose())
squared_sums = hypergraph_adjacency.sum(axis = 1)
squared_sums = np.squeeze(np.asarray(squared_sums))
chunks_size = get_chunk_size(N_rows, 7)
for i in range(0, N_rows, chunks_size):
n_dim = min(chunks_size, N_rows - i)
temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense()
temp = np.squeeze(np.asarray(temp))
x = squared_sums[i:min(i+chunks_size, N_rows)]
x = x.reshape(-1, 1)
x = np.dot(x, np.ones((1, squared_sums.size)))
y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1))
temp = np.divide(temp, x + y - temp)
temp *= scale_factor
Jaccard_matrix = np.rint(temp)
similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix
del Jaccard_matrix, temp, x, y
gc.collect()
# Done computing the matrix of pairwise Jaccard similarity scores.
print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of "
"pairwise Jaccard similarity scores.")
cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w)
cluster_labels = one_to_max(cluster_labels)
# After 'cmetis' returns, we are done with clustering hyper-edges
# We are now ready to start the procedure meant to collapse meta-clusters.
N_consensus = np.amax(cluster_labels) + 1
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus * N_samples)
clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum',
tables.Float32Atom(), (N_consensus, N_samples),
'Matrix of mean memberships, forming meta-clusters',
filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 7)
for i in range(0, N_consensus, chunks_size):
x = min(chunks_size, N_consensus - i)
matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1)))
M = np.zeros((x, N_samples))
for j in range(x):
coord = np.where(matched_clusters[0] == j)[0]
M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0))
clb_cum[i:min(i+chunks_size, N_consensus)] = M
# Done with collapsing the hyper-edges into a single meta-hyper-edge,
# for each of the (N_consensus - 1) meta-clusters.
del hypergraph_adjacency
gc.collect()
# Each object will now be assigned to its most associated meta-cluster.
chunks_size = get_chunk_size(N_consensus, 4)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0]
else:
szumsz = np.zeros(0)
for i in range(N_chunks):
M = clb_cum[:, i*chunks_size:(i+1)*chunks_size]
szumsz = np.append(szumsz, M.sum(axis = 0))
if remainder != 0:
M = clb_cum[:, N_chunks*chunks_size:N_samples]
szumsz = np.append(szumsz, M.sum(axis = 0))
null_columns = np.where(szumsz == 0)[0]
if null_columns.size != 0:
print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations "
"in 'clb_cum' matrix of meta-clusters.".format(null_columns.size))
clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size)
random_state = np.random.RandomState()
tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(),
(N_consensus, N_samples), "Temporary matrix to help with "
"collapsing to meta-hyper-edges", filters = FILTERS)
chunks_size = get_chunk_size(N_samples, 2)
N_chunks, remainder = divmod(N_consensus, chunks_size)
if N_chunks == 0:
tmp[:] = random_state.rand(N_consensus, N_samples)
else:
for i in range(N_chunks):
tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples)
if remainder !=0:
tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples)
expr = tables.Expr("clb_cum + (tmp / 10000)")
expr.set_output(clb_cum)
expr.eval()
expr = tables.Expr("abs(tmp)")
expr.set_output(tmp)
expr.eval()
chunks_size = get_chunk_size(N_consensus, 2)
N_chunks, remainder = divmod(N_samples, chunks_size)
if N_chunks == 0:
sum_diag = tmp[:].sum(axis = 0)
else:
sum_diag = np.empty(0)
for i in range(N_chunks):
M = tmp[:, i*chunks_size:(i+1)*chunks_size]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
if remainder != 0:
M = tmp[:, N_chunks*chunks_size:N_samples]
sum_diag = np.append(sum_diag, M.sum(axis = 0))
fileh.remove_node(fileh.root.consensus_group, "tmp")
# The corresponding disk space will be freed after a call to 'fileh.close()'.
inv_sum_diag = np.reciprocal(sum_diag.astype(float))
if N_chunks == 0:
clb_cum *= inv_sum_diag
max_entries = np.amax(clb_cum, axis = 0)
else:
max_entries = np.zeros(N_samples)
for i in range(N_chunks):
clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size]
max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0)
if remainder != 0:
clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples]
max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0)
cluster_labels = np.zeros(N_samples, dtype = int)
winner_probabilities = np.zeros(N_samples)
chunks_size = get_chunk_size(N_samples, 2)
for i in reversed(range(0, N_consensus, chunks_size)):
ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)])
cluster_labels[ind[1]] = i + ind[0]
winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])]
# Done with competing for objects.
cluster_labels = one_to_max(cluster_labels)
print("INFO: Cluster_Ensembles: MCLA: delivering "
"{} clusters.".format(np.unique(cluster_labels).size))
print("INFO: Cluster_Ensembles: MCLA: average posterior "
"probability is {}".format(np.mean(winner_probabilities)))
if cluster_labels.size <= 7:
print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:")
print(winner_probabilities)
print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:")
print(clb_cum)
fileh.remove_node(fileh.root.consensus_group, "clb_cum")
fileh.close()
return cluster_labels
def create_membership_matrix(cluster_run):
"""For a label vector represented by cluster_run, constructs the binary
membership indicator matrix. Such matrices, when concatenated, contribute
to the adjacency matrix for a hypergraph representation of an
ensemble of clusterings.
Parameters
----------
cluster_run : array of shape (n_partitions, n_samples)
Returns
-------
An adjacnecy matrix in compressed sparse row form.
"""
cluster_run = np.asanyarray(cluster_run)
if reduce(operator.mul, cluster_run.shape, 1) != max(cluster_run.shape):
raise ValueError("\nERROR: Cluster_Ensembles: create_membership_matrix: "
"problem in dimensions of the cluster label vector "
"under consideration.")
else:
cluster_run = cluster_run.reshape(cluster_run.size)
cluster_ids = np.unique(np.compress(np.isfinite(cluster_run), cluster_run))
indices = np.empty(0, dtype = np.int32)
indptr = np.zeros(1, dtype = np.int32)
for elt in cluster_ids:
indices = np.append(indices, np.where(cluster_run == elt)[0])
indptr = np.append(indptr, indices.size)
data = np.ones(indices.size, dtype = int)
return scipy.sparse.csr_matrix((data, indices, indptr), shape = (cluster_ids.size, cluster_run.size))
def metis(hdf5_file_name, N_clusters_max):
"""METIS algorithm by Karypis and Kumar. Partitions the induced similarity graph
passed by CSPA.
Parameters
----------
hdf5_file_name : string or file handle
N_clusters_max : int
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the CSPA heuristics for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name)
labels = sgraph(N_clusters_max, file_name)
subprocess.call(['rm', file_name])
return labels
def hmetis(hdf5_file_name, N_clusters_max, w = None):
"""Gives cluster labels ranging from 1 to N_clusters_max for
hypergraph partitioning required for HGPA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optional (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the HGPA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis, R. Aggarwal, V. Kumar and S. Shekhar, "Multilevel hypergraph
partitioning: applications in VLSI domain"
In: IEEE Transactions on Very Large Scale Integration (VLSI) Systems,
Vol. 7, No. 1, pp. 69-79, 1999.
"""
if w is None:
file_name = wgraph(hdf5_file_name, None, 2)
else:
file_name = wgraph(hdf5_file_name, w, 3)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def cmetis(hdf5_file_name, N_clusters_max, w = None):
"""Returns cluster labellings ranging from 1 to N_clusters_max
for hypergraph partitioning involved in MCLA.
Parameters
----------
hdf5_file_name : file handle or string
N_clusters_max : int
w : array, optiona (default = None)
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of the MCLA approximation algorithm for consensus clustering.
Reference
---------
G. Karypis and V. Kumar, "A Fast and High Quality Multilevel Scheme for
Partitioning Irregular Graphs"
In: SIAM Journal on Scientific Computing, Vol. 20, No. 1, pp. 359-392, 1999.
"""
file_name = wgraph(hdf5_file_name, w, 1)
labels = sgraph(N_clusters_max, file_name)
labels = one_to_max(labels)
subprocess.call(['rm', file_name])
return labels
def wgraph(hdf5_file_name, w = None, method = 0):
"""Write a graph file in a format apposite to later use by METIS or HMETIS.
Parameters
----------
hdf5_file_name : file handle or string
w : list or array, optional (default = None)
method : int, optional (default = 0)
Returns
-------
file_name : string
"""
print('\n#')
if method == 0:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_CSPA
file_name = 'wgraph_CSPA'
elif method == 1:
fileh = tables.open_file(hdf5_file_name, 'r+')
e_mat = fileh.root.consensus_group.similarities_MCLA
file_name = 'wgraph_MCLA'
elif method in {2, 3}:
hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name)
e_mat = hypergraph_adjacency.copy().transpose()
file_name = 'wgraph_HGPA'
fileh = tables.open_file(hdf5_file_name, 'r+')
else:
raise ValueError("\nERROR: Cluster_Ensembles: wgraph: "
"invalid code for choice of method; "
"choose either 0, 1, 2 or 3.")
if w is None:
w = []
N_rows = e_mat.shape[0]
N_cols = e_mat.shape[1]
if method in {0, 1}:
diag_ind = np.diag_indices(N_rows)
e_mat[diag_ind] = 0
if method == 1:
scale_factor = 100.0
w_sum_before = np.sum(w)
w *= scale_factor
w = np.rint(w)
with open(file_name, 'w') as file:
print("INFO: Cluster_Ensembles: wgraph: writing {}.".format(file_name))
if method == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
if int(sz) == 0:
return 'DO_NOT_PROCESS'
else:
file.write('{} {} 1\n'.format(N_rows, int(sz)))
elif method == 1:
chunks_size = get_chunk_size(N_cols, 2)
N_chunks, remainder = divmod(N_rows, chunks_size)
if N_chunks == 0:
sz = float(np.sum(e_mat[:] > 0)) / 2
else:
sz = 0
for i in range(N_chunks):
M = e_mat[i*chunks_size:(i+1)*chunks_size]
sz += float(np.sum(M > 0))
if remainder != 0:
M = e_mat[N_chunks*chunks_size:N_rows]
sz += float(np.sum(M > 0))
sz = float(sz) / 2
file.write('{} {} 11\n'.format(N_rows, int(sz)))
else:
file.write('{} {} 1\n'.format(N_cols, N_rows))
if method in {0, 1}:
chunks_size = get_chunk_size(N_cols, 2)
for i in range(0, N_rows, chunks_size):
M = e_mat[i:min(i+chunks_size, N_rows)]
for j in range(M.shape[0]):
edges = np.where(M[j] > 0)[0]
weights = M[j, edges]
if method == 0:
interlaced = np.zeros(2 * edges.size, dtype = int)
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[::2] = edges + 1
interlaced[1::2] = weights
else:
interlaced = np.zeros(1 + 2 * edges.size, dtype = int)
interlaced[0] = w[i + j]
# METIS and hMETIS have vertices numbering starting from 1:
interlaced[1::2] = edges + 1
interlaced[2::2] = weights
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
else:
print("INFO: Cluster_Ensembles: wgraph: {N_rows} vertices and {N_cols} "
"non-zero hyper-edges.".format(**locals()))
chunks_size = get_chunk_size(N_rows, 2)
for i in range(0, N_cols, chunks_size):
M = np.asarray(e_mat[:, i:min(i+chunks_size, N_cols)].todense())
for j in range(M.shape[1]):
edges = np.where(M[:, j] > 0)[0]
if method == 2:
weight = np.array(M[:, j].sum(), dtype = int)
else:
weight = w[i + j]
# METIS and hMETIS require vertices numbering starting from 1:
interlaced = np.append(weight, edges + 1)
for elt in interlaced:
file.write('{} '.format(int(elt)))
file.write('\n')
if method in {0, 1}:
fileh.remove_node(fileh.root.consensus_group, e_mat.name)
fileh.close()
print('#')
return file_name
def sgraph(N_clusters_max, file_name):
"""Runs METIS or hMETIS and returns the labels found by those
(hyper-)graph partitioning algorithms.
Parameters
----------
N_clusters_max : int
file_name : string
Returns
-------
labels : array of shape (n_samples,)
A vector of labels denoting the cluster to which each sample has been assigned
as a result of any of three approximation algorithms for consensus clustering
(either of CSPA, HGPA or MCLA).
"""
if file_name == 'DO_NOT_PROCESS':
return []
print('\n#')
k = str(N_clusters_max)
out_name = file_name + '.part.' + k
if file_name == 'wgraph_HGPA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling shmetis for hypergraph partitioning.")
if sys.platform.startswith('linux'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-linux/shmetis')
elif sys.platform.startswith('darwin'):
shmetis_path = pkg_resources.resource_filename(__name__,
'Hypergraph_Partitioning/hmetis-1.5-osx-i686/shmetis')
else:
print("ERROR: Cluster_Ensembles: sgraph:\n"
"your platform is not supported. Some code required for graph partition "
"is only available for Linux distributions and OS X.")
sys.exit(1)
args = "{0} ./".format(shmetis_path) + file_name + " " + k + " 15"
subprocess.call(args, shell = True)
elif file_name == 'wgraph_CSPA' or file_name == 'wgraph_MCLA':
print("INFO: Cluster_Ensembles: sgraph: "
"calling gpmetis for graph partitioning.")
args = "gpmetis ./" + file_name + " " + k
subprocess.call(args, shell = True)
else:
raise NameError("ERROR: Cluster_Ensembles: sgraph: {} is not an acceptable "
"file-name.".format(file_name))
labels = np.empty(0, dtype = int)
with open(out_name, 'r') as file:
print("INFO: Cluster_Ensembles: sgraph: (hyper)-graph partitioning completed; "
"loading {}".format(out_name))
labels = np.loadtxt(out_name, dtype = int)
labels = labels.reshape(labels.size)
labels = one_to_max(labels)
subprocess.call(['rm', out_name])
print('#')
return labels
def overlap_matrix(hdf5_file_name, consensus_labels, cluster_runs):
"""Writes on disk (in an HDF5 file whose handle is provided as the first
argument to this function) a stack of matrices, each describing
for a particular run the overlap of cluster ID's that are matching
each of the cluster ID's stored in 'consensus_labels'
(the vector of labels obtained by ensemble clustering).
Returns also the adjacency matrix for consensus clustering
and a vector of mutual informations between each of the clusterings
from the ensemble and their consensus.
Parameters
----------
hdf5_file_name : file handle or string
consensus_labels : array of shape (n_samples,)
cluster_runs : array of shape (n_partitions, n_samples)
Returns
-------
cluster_dims_list :
mutual_info_list :
consensus_adjacency :
"""
if reduce(operator.mul, cluster_runs.shape, 1) == max(cluster_runs.shape):
cluster_runs = cluster_runs.reshape(1, -1)
N_runs, N_samples = cluster_runs.shape
N_consensus_labels = np.unique(consensus_labels).size
indices_consensus_adjacency = np.empty(0, dtype = np.int32)
indptr_consensus_adjacency = np.zeros(1, dtype = np.int64)
for k in range(N_consensus_labels):
indices_consensus_adjacency = np.append(indices_consensus_adjacency, np.where(consensus_labels == k)[0])
indptr_consensus_adjacency = np.append(indptr_consensus_adjacency, indices_consensus_adjacency.size)
data_consensus_adjacency = np.ones(indices_consensus_adjacency.size, dtype = int)
consensus_adjacency = scipy.sparse.csr_matrix((data_consensus_adjacency, indices_consensus_adjacency, indptr_consensus_adjacency),
shape = (N_consensus_labels, N_samples))
fileh = tables.open_file(hdf5_file_name, 'r+')
FILTERS = get_compression_filter(4 * N_consensus_labels * N_runs)
overlap_matrix = fileh.create_earray(fileh.root.consensus_group, 'overlap_matrix',
tables.Float32Atom(), (0, N_consensus_labels),
"Matrix of overlaps between each run and "
"the consensus labellings", filters = FILTERS,
expectedrows = N_consensus_labels * N_runs)
mutual_info_list = []
cluster_dims_list = [0]
for i in range(N_runs):
M = cluster_runs[i]
mutual_info_list.append(ceEvalMutual(M, consensus_labels))
finite_indices = np.where(np.isfinite(M))[0]
positive_indices = np.where(M >= 0)[0]
selected_indices = np.intersect1d(finite_indices, positive_indices, assume_unique = True)
cluster_ids = np.unique(M[selected_indices])
n_ids = cluster_ids.size
cluster_dims_list.append(n_ids)
unions = np.zeros((n_ids, N_consensus_labels), dtype = float)
indices = np.empty(0, dtype = int)
indptr = [0]
c = 0
for elt in cluster_ids:
indices = np.append(indices, np.where(M == elt)[0])
indptr.append(indices.size)
for k in range(N_consensus_labels):
x = indices_consensus_adjacency[indptr_consensus_adjacency[k]:indptr_consensus_adjacency[k+1]]
unions[c, k] = np.union1d(indices, x).size
c += 1
data = np.ones(indices.size, dtype = int)
I = scipy.sparse.csr_matrix((data, indices, indptr), shape = (n_ids, N_samples))
intersections = I.dot(consensus_adjacency.transpose())
intersections = np.squeeze(np.asarray(intersections.todense()))
overlap_matrix.append(np.divide(intersections, unions))
fileh.close()
return cluster_dims_list, mutual_info_list, consensus_adjacency
|
lalinsky/python-phoenixdb | phoenixdb/__init__.py | connect | python | def connect(url, max_retries=None, **kwargs):
client = AvaticaClient(url, max_retries=max_retries)
client.connect()
return Connection(client, **kwargs) | Connects to a Phoenix query server.
:param url:
URL to the Phoenix query server, e.g. ``http://localhost:8765/``
:param autocommit:
Switch the connection to autocommit mode.
:param readonly:
Switch the connection to readonly mode.
:param max_retries:
The maximum number of retries in case there is a connection error.
:param cursor_factory:
If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it.
:returns:
:class:`~phoenixdb.connection.Connection` object. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/__init__.py#L44-L67 | [
"def connect(self):\n \"\"\"Opens a HTTP connection to the RPC server.\"\"\"\n logger.debug(\"Opening connection to %s:%s\", self.url.hostname, self.url.port)\n try:\n self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)\n self.connection.connect()\n except (httplib.HTTPException, socket.error) as e:\n raise errors.InterfaceError('Unable to connect to the specified service', e)\n"
] | # Copyright 2015 Lukas Lalinsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from phoenixdb import errors, types
from phoenixdb.avatica import AvaticaClient
from phoenixdb.connection import Connection
from phoenixdb.errors import * # noqa: F401,F403
from phoenixdb.types import * # noqa: F401,F403
__all__ = ['connect', 'apilevel', 'threadsafety', 'paramstyle'] + types.__all__ + errors.__all__
apilevel = "2.0"
"""
This module supports the `DB API 2.0 interface <https://www.python.org/dev/peps/pep-0249/>`_.
"""
threadsafety = 1
"""
Multiple threads can share the module, but neither connections nor cursors.
"""
paramstyle = 'qmark'
"""
Parmetrized queries should use the question mark as a parameter placeholder.
For example::
cursor.execute("SELECT * FROM table WHERE id = ?", [my_id])
"""
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.connect | python | def connect(self):
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e) | Opens a HTTP connection to the RPC server. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L151-L158 | null | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.close | python | def close(self):
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None | Closes the HTTP connection to the RPC server. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L160-L168 | null | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.connection_sync | python | def connection_sync(self, connection_id, connProps=None):
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props | Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L286-L314 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.open_connection | python | def open_connection(self, connection_id, info=None):
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data) | Opens a new connection.
:param connection_id:
ID of the connection to open. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L316-L331 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.close_connection | python | def close_connection(self, connection_id):
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request) | Closes a connection.
:param connection_id:
ID of the connection to close. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L333-L341 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.create_statement | python | def create_statement(self, connection_id):
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id | Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L343-L358 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.close_statement | python | def close_statement(self, connection_id, statement_id):
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request) | Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L360-L373 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.prepare_and_execute | python | def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results | Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L375-L408 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.prepare | python | def prepare(self, connection_id, sql, max_rows_total=None):
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement | Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L410-L434 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.execute | python | def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results | Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L436-L475 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
lalinsky/python-phoenixdb | phoenixdb/avatica/client.py | AvaticaClient.fetch | python | def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame | Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/client.py#L477-L509 | [
"def _apply(self, request_data, expected_response_type=None):\n logger.debug(\"Sending request\\n%s\", pprint.pformat(request_data))\n\n request_name = request_data.__class__.__name__\n message = common_pb2.WireMessage()\n message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)\n message.wrapped_message = request_data.SerializeToString()\n body = message.SerializeToString()\n headers = {'content-type': 'application/x-google-protobuf'}\n\n response = self._post_request(body, headers)\n response_body = response.read()\n\n if response.status != httplib.OK:\n logger.debug(\"Received response\\n%s\", response_body)\n if b'<html>' in response_body:\n parse_error_page(response_body)\n else:\n # assume the response is in protobuf format\n parse_error_protobuf(response_body)\n raise errors.InterfaceError('RPC request returned invalid status code', response.status)\n\n message = common_pb2.WireMessage()\n message.ParseFromString(response_body)\n\n logger.debug(\"Received response\\n%s\", message)\n\n if expected_response_type is None:\n expected_response_type = request_name.replace('Request', 'Response')\n\n expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type\n if message.name != expected_response_type:\n raise errors.InterfaceError('unexpected response type \"{}\"'.format(message.name))\n\n return message.wrapped_message\n"
] | class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.connection = None
def connect(self):
"""Opens a HTTP connection to the RPC server."""
logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
self.connection.connect()
except (httplib.HTTPException, socket.error) as e:
raise errors.InterfaceError('Unable to connect to the specified service', e)
def close(self):
"""Closes the HTTP connection to the RPC server."""
if self.connection is not None:
logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
try:
self.connection.close()
except httplib.HTTPException:
logger.warning("Error while closing connection", exc_info=True)
self.connection = None
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.read()
if response.status != httplib.OK:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
|
lalinsky/python-phoenixdb | phoenixdb/cursor.py | Cursor.close | python | def close(self):
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True | Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L86-L102 | null | class Cursor(object):
"""Database cursor for executing queries and iterating over results.
You should not construct this object manually, use :meth:`Connection.cursor() <phoenixdb.connection.Connection.cursor>` instead.
"""
arraysize = 1
"""
Read/write attribute specifying the number of rows to fetch
at a time with :meth:`fetchmany`. It defaults to 1 meaning to
fetch a single row at a time.
"""
itersize = 2000
"""
Read/write attribute specifying the number of rows to fetch
from the backend at each network roundtrip during iteration
on the cursor. The default is 2000.
"""
def __init__(self, connection, id=None):
self._connection = connection
self._id = id
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = False
self.arraysize = self.__class__.arraysize
self.itersize = self.__class__.itersize
self._updatecount = -1
def __del__(self):
if not self._connection._closed and not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def __iter__(self):
return self
def __next__(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
next = __next__
@property
def closed(self):
"""Read-only attribute specifying if the cursor is closed or not."""
return self._closed
@property
def description(self):
if self._signature is None:
return None
description = []
for column in self._signature.columns:
description.append(ColumnDescription(
column.column_name,
column.type.name,
column.display_size,
None,
column.precision,
column.scale,
None if column.nullable == 2 else bool(column.nullable),
))
return description
def _set_id(self, id):
if self._id is not None and self._id != id:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = id
def _set_signature(self, signature):
self._signature = signature
self._column_data_types = []
self._parameter_data_types = []
if signature is None:
return
for column in signature.columns:
dtype = TypeHelper.from_class(column.column_class_name)
self._column_data_types.append(dtype)
for parameter in signature.parameters:
dtype = TypeHelper.from_class(parameter.class_name)
self._parameter_data_types.append(dtype)
def _set_frame(self, frame):
self._frame = frame
self._pos = None
if frame is not None:
if frame.rows:
self._pos = 0
elif not frame.done:
raise InternalError('got an empty frame, but the statement is not done yet')
def _fetch_next_frame(self):
offset = self._frame.offset + len(self._frame.rows)
frame = self._connection._client.fetch(
self._connection._id, self._id,
offset=offset, frame_max_size=self.itersize)
self._set_frame(frame)
def _process_results(self, results):
if results:
result = results[0]
if result.own_statement:
self._set_id(result.statement_id)
self._set_signature(result.signature if result.HasField('signature') else None)
self._set_frame(result.first_frame if result.HasField('first_frame') else None)
self._updatecount = result.update_count
def _transform_parameters(self, parameters):
typed_parameters = []
for value, data_type in zip(parameters, self._parameter_data_types):
field_name, rep, mutate_to, cast_from = data_type
typed_value = common_pb2.TypedValue()
if value is None:
typed_value.null = True
typed_value.type = common_pb2.NULL
else:
typed_value.null = False
# use the mutator function
if mutate_to is not None:
value = mutate_to(value)
typed_value.type = rep
setattr(typed_value, field_name, value)
typed_parameters.append(typed_value)
return typed_parameters
def execute(self, operation, parameters=None):
if self._closed:
raise ProgrammingError('the cursor is already closed')
self._updatecount = -1
self._set_frame(None)
if parameters is None:
if self._id is None:
self._set_id(self._connection._client.create_statement(self._connection._id))
results = self._connection._client.prepare_and_execute(
self._connection._id, self._id,
operation, first_frame_max_size=self.itersize)
self._process_results(results)
else:
statement = self._connection._client.prepare(
self._connection._id, operation)
self._set_id(statement.id)
self._set_signature(statement.signature)
results = self._connection._client.execute(
self._connection._id, self._id,
statement.signature, self._transform_parameters(parameters),
first_frame_max_size=self.itersize)
self._process_results(results)
def executemany(self, operation, seq_of_parameters):
if self._closed:
raise ProgrammingError('the cursor is already closed')
self._updatecount = -1
self._set_frame(None)
statement = self._connection._client.prepare(
self._connection._id, operation, max_rows_total=0)
self._set_id(statement.id)
self._set_signature(statement.signature)
for parameters in seq_of_parameters:
self._connection._client.execute(
self._connection._id, self._id,
statement.signature, self._transform_parameters(parameters),
first_frame_max_size=0)
def _transform_row(self, row):
"""Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError
"""
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row
def fetchone(self):
if self._frame is None:
raise ProgrammingError('no select statement was executed')
if self._pos is None:
return None
rows = self._frame.rows
row = self._transform_row(rows[self._pos])
self._pos += 1
if self._pos >= len(rows):
self._pos = None
if not self._frame.done:
self._fetch_next_frame()
return row
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
rows = []
while size > 0:
row = self.fetchone()
if row is None:
break
rows.append(row)
size -= 1
return rows
def fetchall(self):
rows = []
while True:
row = self.fetchone()
if row is None:
break
rows.append(row)
return rows
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
@property
def connection(self):
"""Read-only attribute providing access to the :class:`Connection <phoenixdb.connection.Connection>`
object this cursor was created from."""
return self._connection
@property
def rowcount(self):
"""Read-only attribute specifying the number of rows affected by
the last executed DML statement or -1 if the number cannot be
determined. Note that this will always be set to -1 for select
queries."""
# TODO instead of -1, this ends up being set to Integer.MAX_VALUE
if self._updatecount == MAX_INT:
return -1
return self._updatecount
@property
def rownumber(self):
"""Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence.
"""
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos
|
lalinsky/python-phoenixdb | phoenixdb/cursor.py | Cursor._transform_row | python | def _transform_row(self, row):
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row | Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L233-L263 | null | class Cursor(object):
"""Database cursor for executing queries and iterating over results.
You should not construct this object manually, use :meth:`Connection.cursor() <phoenixdb.connection.Connection.cursor>` instead.
"""
arraysize = 1
"""
Read/write attribute specifying the number of rows to fetch
at a time with :meth:`fetchmany`. It defaults to 1 meaning to
fetch a single row at a time.
"""
itersize = 2000
"""
Read/write attribute specifying the number of rows to fetch
from the backend at each network roundtrip during iteration
on the cursor. The default is 2000.
"""
def __init__(self, connection, id=None):
self._connection = connection
self._id = id
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = False
self.arraysize = self.__class__.arraysize
self.itersize = self.__class__.itersize
self._updatecount = -1
def __del__(self):
if not self._connection._closed and not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def __iter__(self):
return self
def __next__(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
next = __next__
def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True
@property
def closed(self):
"""Read-only attribute specifying if the cursor is closed or not."""
return self._closed
@property
def description(self):
if self._signature is None:
return None
description = []
for column in self._signature.columns:
description.append(ColumnDescription(
column.column_name,
column.type.name,
column.display_size,
None,
column.precision,
column.scale,
None if column.nullable == 2 else bool(column.nullable),
))
return description
def _set_id(self, id):
if self._id is not None and self._id != id:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = id
def _set_signature(self, signature):
self._signature = signature
self._column_data_types = []
self._parameter_data_types = []
if signature is None:
return
for column in signature.columns:
dtype = TypeHelper.from_class(column.column_class_name)
self._column_data_types.append(dtype)
for parameter in signature.parameters:
dtype = TypeHelper.from_class(parameter.class_name)
self._parameter_data_types.append(dtype)
def _set_frame(self, frame):
self._frame = frame
self._pos = None
if frame is not None:
if frame.rows:
self._pos = 0
elif not frame.done:
raise InternalError('got an empty frame, but the statement is not done yet')
def _fetch_next_frame(self):
offset = self._frame.offset + len(self._frame.rows)
frame = self._connection._client.fetch(
self._connection._id, self._id,
offset=offset, frame_max_size=self.itersize)
self._set_frame(frame)
def _process_results(self, results):
if results:
result = results[0]
if result.own_statement:
self._set_id(result.statement_id)
self._set_signature(result.signature if result.HasField('signature') else None)
self._set_frame(result.first_frame if result.HasField('first_frame') else None)
self._updatecount = result.update_count
def _transform_parameters(self, parameters):
typed_parameters = []
for value, data_type in zip(parameters, self._parameter_data_types):
field_name, rep, mutate_to, cast_from = data_type
typed_value = common_pb2.TypedValue()
if value is None:
typed_value.null = True
typed_value.type = common_pb2.NULL
else:
typed_value.null = False
# use the mutator function
if mutate_to is not None:
value = mutate_to(value)
typed_value.type = rep
setattr(typed_value, field_name, value)
typed_parameters.append(typed_value)
return typed_parameters
def execute(self, operation, parameters=None):
if self._closed:
raise ProgrammingError('the cursor is already closed')
self._updatecount = -1
self._set_frame(None)
if parameters is None:
if self._id is None:
self._set_id(self._connection._client.create_statement(self._connection._id))
results = self._connection._client.prepare_and_execute(
self._connection._id, self._id,
operation, first_frame_max_size=self.itersize)
self._process_results(results)
else:
statement = self._connection._client.prepare(
self._connection._id, operation)
self._set_id(statement.id)
self._set_signature(statement.signature)
results = self._connection._client.execute(
self._connection._id, self._id,
statement.signature, self._transform_parameters(parameters),
first_frame_max_size=self.itersize)
self._process_results(results)
def executemany(self, operation, seq_of_parameters):
if self._closed:
raise ProgrammingError('the cursor is already closed')
self._updatecount = -1
self._set_frame(None)
statement = self._connection._client.prepare(
self._connection._id, operation, max_rows_total=0)
self._set_id(statement.id)
self._set_signature(statement.signature)
for parameters in seq_of_parameters:
self._connection._client.execute(
self._connection._id, self._id,
statement.signature, self._transform_parameters(parameters),
first_frame_max_size=0)
def fetchone(self):
if self._frame is None:
raise ProgrammingError('no select statement was executed')
if self._pos is None:
return None
rows = self._frame.rows
row = self._transform_row(rows[self._pos])
self._pos += 1
if self._pos >= len(rows):
self._pos = None
if not self._frame.done:
self._fetch_next_frame()
return row
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
rows = []
while size > 0:
row = self.fetchone()
if row is None:
break
rows.append(row)
size -= 1
return rows
def fetchall(self):
rows = []
while True:
row = self.fetchone()
if row is None:
break
rows.append(row)
return rows
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
@property
def connection(self):
"""Read-only attribute providing access to the :class:`Connection <phoenixdb.connection.Connection>`
object this cursor was created from."""
return self._connection
@property
def rowcount(self):
"""Read-only attribute specifying the number of rows affected by
the last executed DML statement or -1 if the number cannot be
determined. Note that this will always be set to -1 for select
queries."""
# TODO instead of -1, this ends up being set to Integer.MAX_VALUE
if self._updatecount == MAX_INT:
return -1
return self._updatecount
@property
def rownumber(self):
"""Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence.
"""
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos
|
lalinsky/python-phoenixdb | phoenixdb/cursor.py | Cursor.rownumber | python | def rownumber(self):
if self._frame is not None and self._pos is not None:
return self._frame.offset + self._pos
return self._pos | Read-only attribute providing the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined.
The index can be seen as index of the cursor in a sequence
(the result set). The next fetch operation will fetch the
row indexed by :attr:`rownumber` in that sequence. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/cursor.py#L324-L335 | null | class Cursor(object):
"""Database cursor for executing queries and iterating over results.
You should not construct this object manually, use :meth:`Connection.cursor() <phoenixdb.connection.Connection.cursor>` instead.
"""
arraysize = 1
"""
Read/write attribute specifying the number of rows to fetch
at a time with :meth:`fetchmany`. It defaults to 1 meaning to
fetch a single row at a time.
"""
itersize = 2000
"""
Read/write attribute specifying the number of rows to fetch
from the backend at each network roundtrip during iteration
on the cursor. The default is 2000.
"""
def __init__(self, connection, id=None):
self._connection = connection
self._id = id
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = False
self.arraysize = self.__class__.arraysize
self.itersize = self.__class__.itersize
self._updatecount = -1
def __del__(self):
if not self._connection._closed and not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def __iter__(self):
return self
def __next__(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
next = __next__
def close(self):
"""Closes the cursor.
No further operations are allowed once the cursor is closed.
If the cursor is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the cursor is already closed')
if self._id is not None:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = None
self._signature = None
self._column_data_types = []
self._frame = None
self._pos = None
self._closed = True
@property
def closed(self):
"""Read-only attribute specifying if the cursor is closed or not."""
return self._closed
@property
def description(self):
if self._signature is None:
return None
description = []
for column in self._signature.columns:
description.append(ColumnDescription(
column.column_name,
column.type.name,
column.display_size,
None,
column.precision,
column.scale,
None if column.nullable == 2 else bool(column.nullable),
))
return description
def _set_id(self, id):
if self._id is not None and self._id != id:
self._connection._client.close_statement(self._connection._id, self._id)
self._id = id
def _set_signature(self, signature):
self._signature = signature
self._column_data_types = []
self._parameter_data_types = []
if signature is None:
return
for column in signature.columns:
dtype = TypeHelper.from_class(column.column_class_name)
self._column_data_types.append(dtype)
for parameter in signature.parameters:
dtype = TypeHelper.from_class(parameter.class_name)
self._parameter_data_types.append(dtype)
def _set_frame(self, frame):
self._frame = frame
self._pos = None
if frame is not None:
if frame.rows:
self._pos = 0
elif not frame.done:
raise InternalError('got an empty frame, but the statement is not done yet')
def _fetch_next_frame(self):
offset = self._frame.offset + len(self._frame.rows)
frame = self._connection._client.fetch(
self._connection._id, self._id,
offset=offset, frame_max_size=self.itersize)
self._set_frame(frame)
def _process_results(self, results):
if results:
result = results[0]
if result.own_statement:
self._set_id(result.statement_id)
self._set_signature(result.signature if result.HasField('signature') else None)
self._set_frame(result.first_frame if result.HasField('first_frame') else None)
self._updatecount = result.update_count
def _transform_parameters(self, parameters):
typed_parameters = []
for value, data_type in zip(parameters, self._parameter_data_types):
field_name, rep, mutate_to, cast_from = data_type
typed_value = common_pb2.TypedValue()
if value is None:
typed_value.null = True
typed_value.type = common_pb2.NULL
else:
typed_value.null = False
# use the mutator function
if mutate_to is not None:
value = mutate_to(value)
typed_value.type = rep
setattr(typed_value, field_name, value)
typed_parameters.append(typed_value)
return typed_parameters
def execute(self, operation, parameters=None):
if self._closed:
raise ProgrammingError('the cursor is already closed')
self._updatecount = -1
self._set_frame(None)
if parameters is None:
if self._id is None:
self._set_id(self._connection._client.create_statement(self._connection._id))
results = self._connection._client.prepare_and_execute(
self._connection._id, self._id,
operation, first_frame_max_size=self.itersize)
self._process_results(results)
else:
statement = self._connection._client.prepare(
self._connection._id, operation)
self._set_id(statement.id)
self._set_signature(statement.signature)
results = self._connection._client.execute(
self._connection._id, self._id,
statement.signature, self._transform_parameters(parameters),
first_frame_max_size=self.itersize)
self._process_results(results)
def executemany(self, operation, seq_of_parameters):
if self._closed:
raise ProgrammingError('the cursor is already closed')
self._updatecount = -1
self._set_frame(None)
statement = self._connection._client.prepare(
self._connection._id, operation, max_rows_total=0)
self._set_id(statement.id)
self._set_signature(statement.signature)
for parameters in seq_of_parameters:
self._connection._client.execute(
self._connection._id, self._id,
statement.signature, self._transform_parameters(parameters),
first_frame_max_size=0)
def _transform_row(self, row):
"""Transforms a Row into Python values.
:param row:
A ``common_pb2.Row`` object.
:returns:
A list of values casted into the correct Python types.
:raises:
NotImplementedError
"""
tmp_row = []
for i, column in enumerate(row.value):
if column.has_array_value:
raise NotImplementedError('array types are not supported')
elif column.scalar_value.null:
tmp_row.append(None)
else:
field_name, rep, mutate_to, cast_from = self._column_data_types[i]
# get the value from the field_name
value = getattr(column.scalar_value, field_name)
# cast the value
if cast_from is not None:
value = cast_from(value)
tmp_row.append(value)
return tmp_row
def fetchone(self):
if self._frame is None:
raise ProgrammingError('no select statement was executed')
if self._pos is None:
return None
rows = self._frame.rows
row = self._transform_row(rows[self._pos])
self._pos += 1
if self._pos >= len(rows):
self._pos = None
if not self._frame.done:
self._fetch_next_frame()
return row
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
rows = []
while size > 0:
row = self.fetchone()
if row is None:
break
rows.append(row)
size -= 1
return rows
def fetchall(self):
rows = []
while True:
row = self.fetchone()
if row is None:
break
rows.append(row)
return rows
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
@property
def connection(self):
"""Read-only attribute providing access to the :class:`Connection <phoenixdb.connection.Connection>`
object this cursor was created from."""
return self._connection
@property
def rowcount(self):
"""Read-only attribute specifying the number of rows affected by
the last executed DML statement or -1 if the number cannot be
determined. Note that this will always be set to -1 for select
queries."""
# TODO instead of -1, this ends up being set to Integer.MAX_VALUE
if self._updatecount == MAX_INT:
return -1
return self._updatecount
@property
|
lalinsky/python-phoenixdb | phoenixdb/types.py | Timestamp | python | def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second) | Constructs an object holding a datetime/timestamp value. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/types.py#L38-L40 | null | # Copyright 2015 Lukas Lalinsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import datetime
from decimal import Decimal
from phoenixdb.avatica.proto import common_pb2
__all__ = [
'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
'Binary', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOLEAN',
'JAVA_CLASSES', 'JAVA_CLASSES_MAP', 'TypeHelper',
]
def Date(year, month, day):
"""Constructs an object holding a date value."""
return datetime.date(year, month, day)
def Time(hour, minute, second):
"""Constructs an object holding a time value."""
return datetime.time(hour, minute, second)
def DateFromTicks(ticks):
"""Constructs an object holding a date value from the given UNIX timestamp."""
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
"""Constructs an object holding a time value from the given UNIX timestamp."""
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
"""Constructs an object holding a datetime/timestamp value from the given UNIX timestamp."""
return Timestamp(*time.localtime(ticks)[:6])
def Binary(value):
"""Constructs an object capable of holding a binary (long) string value."""
return bytes(value)
def time_from_java_sql_time(n):
dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
return dt.time()
def time_to_java_sql_time(t):
return ((t.hour * 60 + t.minute) * 60 + t.second) * 1000 + t.microsecond // 1000
def date_from_java_sql_date(n):
return datetime.date(1970, 1, 1) + datetime.timedelta(days=n)
def date_to_java_sql_date(d):
if isinstance(d, datetime.datetime):
d = d.date()
td = d - datetime.date(1970, 1, 1)
return td.days
def datetime_from_java_sql_timestamp(n):
return datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
def datetime_to_java_sql_timestamp(d):
td = d - datetime.datetime(1970, 1, 1)
return td.microseconds // 1000 + (td.seconds + td.days * 24 * 3600) * 1000
class ColumnType(object):
def __init__(self, eq_types):
self.eq_types = tuple(eq_types)
self.eq_types_set = set(eq_types)
def __eq__(self, other):
return other in self.eq_types_set
def __cmp__(self, other):
if other in self.eq_types_set:
return 0
if other < self.eq_types:
return 1
else:
return -1
STRING = ColumnType(['VARCHAR', 'CHAR'])
"""Type object that can be used to describe string-based columns."""
BINARY = ColumnType(['BINARY', 'VARBINARY'])
"""Type object that can be used to describe (long) binary columns."""
NUMBER = ColumnType([
'INTEGER', 'UNSIGNED_INT', 'BIGINT', 'UNSIGNED_LONG', 'TINYINT', 'UNSIGNED_TINYINT',
'SMALLINT', 'UNSIGNED_SMALLINT', 'FLOAT', 'UNSIGNED_FLOAT', 'DOUBLE', 'UNSIGNED_DOUBLE', 'DECIMAL'
])
"""Type object that can be used to describe numeric columns."""
DATETIME = ColumnType(['TIME', 'DATE', 'TIMESTAMP', 'UNSIGNED_TIME', 'UNSIGNED_DATE', 'UNSIGNED_TIMESTAMP'])
"""Type object that can be used to describe date/time columns."""
ROWID = ColumnType([])
"""Only implemented for DB API 2.0 compatibility, not used."""
BOOLEAN = ColumnType(['BOOLEAN'])
"""Type object that can be used to describe boolean columns. This is a phoenixdb-specific extension."""
# XXX ARRAY
if sys.version_info[0] < 3:
_long = long # noqa: F821
else:
_long = int
JAVA_CLASSES = {
'bool_value': [
('java.lang.Boolean', common_pb2.BOOLEAN, None, None),
],
'string_value': [
('java.lang.Character', common_pb2.CHARACTER, None, None),
('java.lang.String', common_pb2.STRING, None, None),
('java.math.BigDecimal', common_pb2.BIG_DECIMAL, str, Decimal),
],
'number_value': [
('java.lang.Integer', common_pb2.INTEGER, None, int),
('java.lang.Short', common_pb2.SHORT, None, int),
('java.lang.Long', common_pb2.LONG, None, _long),
('java.lang.Byte', common_pb2.BYTE, None, int),
('java.sql.Time', common_pb2.JAVA_SQL_TIME, time_to_java_sql_time, time_from_java_sql_time),
('java.sql.Date', common_pb2.JAVA_SQL_DATE, date_to_java_sql_date, date_from_java_sql_date),
('java.sql.Timestamp', common_pb2.JAVA_SQL_TIMESTAMP, datetime_to_java_sql_timestamp, datetime_from_java_sql_timestamp),
],
'bytes_value': [
('[B', common_pb2.BYTE_STRING, Binary, None),
],
'double_value': [
# if common_pb2.FLOAT is used, incorrect values are sent
('java.lang.Float', common_pb2.DOUBLE, float, float),
('java.lang.Double', common_pb2.DOUBLE, float, float),
]
}
"""Groups of Java classes."""
JAVA_CLASSES_MAP = dict((v[0], (k, v[1], v[2], v[3])) for k in JAVA_CLASSES for v in JAVA_CLASSES[k])
"""Flips the available types to allow for faster lookup by Java class.
This mapping should be structured as:
{
'java.math.BigDecimal': ('string_value', common_pb2.BIG_DECIMAL, str, Decimal),),
...
'<java class>': (<field_name>, <Rep enum>, <mutate_to function>, <cast_from function>),
}
"""
class TypeHelper(object):
@staticmethod
def from_class(klass):
"""Retrieves a Rep and functions to cast to/from based on the Java class.
:param klass:
The string of the Java class for the column or parameter.
:returns: tuple ``(field_name, rep, mutate_to, cast_from)``
WHERE
``field_name`` is the attribute in ``common_pb2.TypedValue``
``rep`` is the common_pb2.Rep enum
``mutate_to`` is the function to cast values into Phoenix values, if any
``cast_from`` is the function to cast from the Phoenix value to the Python value, if any
:raises:
NotImplementedError
"""
if klass not in JAVA_CLASSES_MAP:
raise NotImplementedError('type {} is not supported'.format(klass))
return JAVA_CLASSES_MAP[klass]
|
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.open | python | def open(self):
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args) | Opens the connection. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L70-L73 | null | class Connection(object):
"""Database connection.
You should not construct this object manually, use :func:`~phoenixdb.connect` instead.
"""
cursor_factory = None
"""
The default cursor factory used by :meth:`cursor` if the parameter is not specified.
"""
def __init__(self, client, cursor_factory=None, **kwargs):
self._client = client
self._closed = False
if cursor_factory is not None:
self.cursor_factory = cursor_factory
else:
self.cursor_factory = Cursor
self._cursors = []
# Extract properties to pass to OpenConnectionRequest
self._connection_args = {}
# The rest of the kwargs
self._filtered_args = {}
for k in kwargs:
if k in OPEN_CONNECTION_PROPERTIES:
self._connection_args[k] = kwargs[k]
else:
self._filtered_args[k] = kwargs[k]
self.open()
self.set_session(**self._filtered_args)
def __del__(self):
if not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True
@property
def closed(self):
"""Read-only attribute specifying if the connection is closed or not."""
return self._closed
def commit(self):
"""Commits pending database changes.
Currently, this does nothing, because the RPC does not support
transactions. Only defined for DB API 2.0 compatibility.
You need to use :attr:`autocommit` mode.
"""
# TODO can support be added for this?
if self._closed:
raise ProgrammingError('the connection is already closed')
def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation
@property
def autocommit(self):
"""Read/write attribute for switching the connection's autocommit mode."""
return self._autocommit
@autocommit.setter
def autocommit(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'autoCommit': bool(value)})
self._autocommit = props.auto_commit
@property
def readonly(self):
"""Read/write attribute for switching the connection's readonly mode."""
return self._readonly
@readonly.setter
def readonly(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'readOnly': bool(value)})
self._readonly = props.read_only
@property
def transactionisolation(self):
return self._transactionisolation
@transactionisolation.setter
def transactionisolation(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'transactionIsolation': bool(value)})
self._transactionisolation = props.transaction_isolation
|
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.close | python | def close(self):
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True | Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L75-L91 | null | class Connection(object):
"""Database connection.
You should not construct this object manually, use :func:`~phoenixdb.connect` instead.
"""
cursor_factory = None
"""
The default cursor factory used by :meth:`cursor` if the parameter is not specified.
"""
def __init__(self, client, cursor_factory=None, **kwargs):
self._client = client
self._closed = False
if cursor_factory is not None:
self.cursor_factory = cursor_factory
else:
self.cursor_factory = Cursor
self._cursors = []
# Extract properties to pass to OpenConnectionRequest
self._connection_args = {}
# The rest of the kwargs
self._filtered_args = {}
for k in kwargs:
if k in OPEN_CONNECTION_PROPERTIES:
self._connection_args[k] = kwargs[k]
else:
self._filtered_args[k] = kwargs[k]
self.open()
self.set_session(**self._filtered_args)
def __del__(self):
if not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def open(self):
"""Opens the connection."""
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args)
@property
def closed(self):
"""Read-only attribute specifying if the connection is closed or not."""
return self._closed
def commit(self):
"""Commits pending database changes.
Currently, this does nothing, because the RPC does not support
transactions. Only defined for DB API 2.0 compatibility.
You need to use :attr:`autocommit` mode.
"""
# TODO can support be added for this?
if self._closed:
raise ProgrammingError('the connection is already closed')
def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation
@property
def autocommit(self):
"""Read/write attribute for switching the connection's autocommit mode."""
return self._autocommit
@autocommit.setter
def autocommit(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'autoCommit': bool(value)})
self._autocommit = props.auto_commit
@property
def readonly(self):
"""Read/write attribute for switching the connection's readonly mode."""
return self._readonly
@readonly.setter
def readonly(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'readOnly': bool(value)})
self._readonly = props.read_only
@property
def transactionisolation(self):
return self._transactionisolation
@transactionisolation.setter
def transactionisolation(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'transactionIsolation': bool(value)})
self._transactionisolation = props.transaction_isolation
|
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.cursor | python | def cursor(self, cursor_factory=None):
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor | Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L109-L126 | null | class Connection(object):
"""Database connection.
You should not construct this object manually, use :func:`~phoenixdb.connect` instead.
"""
cursor_factory = None
"""
The default cursor factory used by :meth:`cursor` if the parameter is not specified.
"""
def __init__(self, client, cursor_factory=None, **kwargs):
self._client = client
self._closed = False
if cursor_factory is not None:
self.cursor_factory = cursor_factory
else:
self.cursor_factory = Cursor
self._cursors = []
# Extract properties to pass to OpenConnectionRequest
self._connection_args = {}
# The rest of the kwargs
self._filtered_args = {}
for k in kwargs:
if k in OPEN_CONNECTION_PROPERTIES:
self._connection_args[k] = kwargs[k]
else:
self._filtered_args[k] = kwargs[k]
self.open()
self.set_session(**self._filtered_args)
def __del__(self):
if not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def open(self):
"""Opens the connection."""
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args)
def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True
@property
def closed(self):
"""Read-only attribute specifying if the connection is closed or not."""
return self._closed
def commit(self):
"""Commits pending database changes.
Currently, this does nothing, because the RPC does not support
transactions. Only defined for DB API 2.0 compatibility.
You need to use :attr:`autocommit` mode.
"""
# TODO can support be added for this?
if self._closed:
raise ProgrammingError('the connection is already closed')
def set_session(self, autocommit=None, readonly=None):
"""Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode.
"""
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation
@property
def autocommit(self):
"""Read/write attribute for switching the connection's autocommit mode."""
return self._autocommit
@autocommit.setter
def autocommit(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'autoCommit': bool(value)})
self._autocommit = props.auto_commit
@property
def readonly(self):
"""Read/write attribute for switching the connection's readonly mode."""
return self._readonly
@readonly.setter
def readonly(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'readOnly': bool(value)})
self._readonly = props.read_only
@property
def transactionisolation(self):
return self._transactionisolation
@transactionisolation.setter
def transactionisolation(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'transactionIsolation': bool(value)})
self._transactionisolation = props.transaction_isolation
|
lalinsky/python-phoenixdb | phoenixdb/connection.py | Connection.set_session | python | def set_session(self, autocommit=None, readonly=None):
props = {}
if autocommit is not None:
props['autoCommit'] = bool(autocommit)
if readonly is not None:
props['readOnly'] = bool(readonly)
props = self._client.connection_sync(self._id, props)
self._autocommit = props.auto_commit
self._readonly = props.read_only
self._transactionisolation = props.transaction_isolation | Sets one or more parameters in the current connection.
:param autocommit:
Switch the connection to autocommit mode. With the current
version, you need to always enable this, because
:meth:`commit` is not implemented.
:param readonly:
Switch the connection to read-only mode. | train | https://github.com/lalinsky/python-phoenixdb/blob/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/connection.py#L128-L147 | null | class Connection(object):
"""Database connection.
You should not construct this object manually, use :func:`~phoenixdb.connect` instead.
"""
cursor_factory = None
"""
The default cursor factory used by :meth:`cursor` if the parameter is not specified.
"""
def __init__(self, client, cursor_factory=None, **kwargs):
self._client = client
self._closed = False
if cursor_factory is not None:
self.cursor_factory = cursor_factory
else:
self.cursor_factory = Cursor
self._cursors = []
# Extract properties to pass to OpenConnectionRequest
self._connection_args = {}
# The rest of the kwargs
self._filtered_args = {}
for k in kwargs:
if k in OPEN_CONNECTION_PROPERTIES:
self._connection_args[k] = kwargs[k]
else:
self._filtered_args[k] = kwargs[k]
self.open()
self.set_session(**self._filtered_args)
def __del__(self):
if not self._closed:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._closed:
self.close()
def open(self):
"""Opens the connection."""
self._id = str(uuid.uuid4())
self._client.open_connection(self._id, info=self._connection_args)
def close(self):
"""Closes the connection.
No further operations are allowed, either on the connection or any
of its cursors, once the connection is closed.
If the connection is used in a ``with`` statement, this method will
be automatically called at the end of the ``with`` block.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
for cursor_ref in self._cursors:
cursor = cursor_ref()
if cursor is not None and not cursor._closed:
cursor.close()
self._client.close_connection(self._id)
self._client.close()
self._closed = True
@property
def closed(self):
"""Read-only attribute specifying if the connection is closed or not."""
return self._closed
def commit(self):
"""Commits pending database changes.
Currently, this does nothing, because the RPC does not support
transactions. Only defined for DB API 2.0 compatibility.
You need to use :attr:`autocommit` mode.
"""
# TODO can support be added for this?
if self._closed:
raise ProgrammingError('the connection is already closed')
def cursor(self, cursor_factory=None):
"""Creates a new cursor.
:param cursor_factory:
This argument can be used to create non-standard cursors.
The class returned must be a subclass of
:class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
A default factory for the connection can also be specified using the
:attr:`cursor_factory` attribute.
:returns:
A :class:`~phoenixdb.cursor.Cursor` object.
"""
if self._closed:
raise ProgrammingError('the connection is already closed')
cursor = (cursor_factory or self.cursor_factory)(self)
self._cursors.append(weakref.ref(cursor, self._cursors.remove))
return cursor
@property
def autocommit(self):
"""Read/write attribute for switching the connection's autocommit mode."""
return self._autocommit
@autocommit.setter
def autocommit(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'autoCommit': bool(value)})
self._autocommit = props.auto_commit
@property
def readonly(self):
"""Read/write attribute for switching the connection's readonly mode."""
return self._readonly
@readonly.setter
def readonly(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'readOnly': bool(value)})
self._readonly = props.read_only
@property
def transactionisolation(self):
return self._transactionisolation
@transactionisolation.setter
def transactionisolation(self, value):
if self._closed:
raise ProgrammingError('the connection is already closed')
props = self._client.connection_sync(self._id, {'transactionIsolation': bool(value)})
self._transactionisolation = props.transaction_isolation
|
VisTrails/tej | tej/submission.py | _unique_names | python | def _unique_names():
characters = ("abcdefghijklmnopqrstuvwxyz"
"0123456789")
characters = [characters[i:i + 1] for i in irange(len(characters))]
rng = random.Random()
while True:
letters = [rng.choice(characters) for i in irange(10)]
yield ''.join(letters) | Generates unique sequences of bytes. | train | https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L31-L40 | null | from __future__ import absolute_import, division, unicode_literals
import getpass
import logging
import paramiko
import pkg_resources
import random
import re
from rpaths import PosixPath, Path
import scp
import select
import socket
from tej.errors import InvalidDestination, QueueDoesntExist, \
QueueLinkBroken, QueueExists, JobAlreadyExists, JobNotFound, \
JobStillRunning, RemoteCommandFailure
from tej.utils import unicode_, string_types, iteritems, irange, shell_escape
__all__ = ['DEFAULT_TEJ_DIR',
'parse_ssh_destination', 'destination_as_string',
'ServerLogger', 'RemoteQueue']
DEFAULT_TEJ_DIR = '~/.tej'
logger = logging.getLogger('tej')
_unique_names = _unique_names()
def make_unique_name():
"""Makes a unique (random) string.
"""
return next(_unique_names)
def escape_queue(s):
"""Escapes the path to a queue, e.g. preserves ~ at the begining.
"""
if isinstance(s, PosixPath):
s = unicode_(s)
elif isinstance(s, bytes):
s = s.decode('utf-8')
if s.startswith('~/'):
return '~/' + shell_escape(s[2:])
else:
return shell_escape(s)
_re_ssh = re.compile(r'^'
r'(?:ssh://)?' # 'ssh://' prefix
r'(?:([a-zA-Z0-9_.-]+)' # 'user@'
r'(?::([^ @]+))?' # ':password'
r'@)?' # '@'
r'([a-zA-Z0-9_.-]+)' # 'host'
r'(?::([0-9]+))?' # ':port'
r'$')
def parse_ssh_destination(destination):
"""Parses the SSH destination argument.
"""
match = _re_ssh.match(destination)
if not match:
raise InvalidDestination("Invalid destination: %s" % destination)
user, password, host, port = match.groups()
info = {}
if user:
info['username'] = user
else:
info['username'] = getpass.getuser()
if password:
info['password'] = password
if port:
info['port'] = int(port)
info['hostname'] = host
return info
def destination_as_string(destination):
if 'password' in destination:
user = '%s:%s' % (destination['username'], destination['password'])
else:
user = destination['username']
if destination.get('port', 22) != 22:
return 'ssh://%s@%s:%d' % (user,
destination['hostname'],
destination['port'])
else:
return 'ssh://%s@%s' % (user,
destination['hostname'])
class ServerLogger(object):
"""Adapter getting bytes from the server's output and handing them to log.
"""
logger = logging.getLogger('tej.server')
def __init__(self):
self.data = []
def append(self, data):
self.data.append(data)
def done(self):
if self.data:
data = b''.join(self.data)
data = data.decode('utf-8', 'replace')
data = data.rstrip()
self.message(data)
self.data = []
def message(self, data):
self.logger.info(data)
JOB_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz" \
"0123456789_-+=@%:.,"
def check_jobid(job_id):
if not all(c in JOB_ID_CHARS for c in job_id):
raise ValueError("Invalid job identifier")
class RemoteQueue(object):
JOB_DONE = 'finished'
JOB_RUNNING = 'running'
JOB_INCOMPLETE = 'incomplete'
JOB_CREATED = 'created'
PROTOCOL_VERSION = 0, 2
def __init__(self, destination, queue,
setup_runtime=None, need_runtime=None):
"""Creates a queue object, that represents a job queue on a server.
:param destination: The address of the server, used to SSH into it.
:param queue: The pathname of the queue on the remote server. Something
like "~/.tej" is usually adequate. This will contain both the job info
and files, and the scripts used to manage it on the server side.
:param setup_runtime: The name of the runtime to deploy on the server
if the queue doesn't already exist. If None (default), it will
auto-detect what is appropriate (currently, `pbs` if the ``qsub``
command is available), and fallback on `default`. If `need_runtime` is
set, this should be one of the accepted values.
:param need_runtime: A list of runtime names that are acceptable. If
the queue already exists on the server and this argument is not None,
the installed runtime will be matched against it, and a failure will be
reported if it is not one of the provided values.
"""
if isinstance(destination, string_types):
self.destination = parse_ssh_destination(destination)
else:
if 'hostname' not in destination:
raise InvalidDestination("destination dictionary is missing "
"hostname")
self.destination = destination
if setup_runtime not in (None, 'default', 'pbs'):
raise ValueError("Selected runtime %r is unknown" % setup_runtime)
self.setup_runtime = setup_runtime
if need_runtime is not None:
self.need_runtime = set(need_runtime)
else:
self.need_runtime = None
self.queue = PosixPath(queue)
self._queue = None
self._ssh = None
self._connect()
def server_logger(self):
"""Handles messages from the server.
By default, uses getLogger('tej.server').warning(). Override this in
subclasses to provide your own mechanism.
"""
return ServerLogger()
@property
def destination_string(self):
return destination_as_string(self.destination)
def _ssh_client(self):
"""Gets an SSH client to connect with.
"""
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
return ssh
def _connect(self):
"""Connects via SSH.
"""
ssh = self._ssh_client()
logger.debug("Connecting with %s",
', '.join('%s=%r' % (k, v if k != "password" else "***")
for k, v in iteritems(self.destination)))
ssh.connect(**self.destination)
logger.debug("Connected to %s", self.destination['hostname'])
self._ssh = ssh
def get_client(self):
"""Gets the SSH client.
This will check that the connection is still alive first, and reconnect
if necessary.
"""
if self._ssh is None:
self._connect()
return self._ssh
else:
try:
chan = self._ssh.get_transport().open_session()
except (socket.error, paramiko.SSHException):
logger.warning("Lost connection, reconnecting...")
self._ssh.close()
self._connect()
else:
chan.close()
return self._ssh
def get_scp_client(self):
return scp.SCPClient(self.get_client().get_transport())
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close()
def check_call(self, cmd):
"""Calls a command through SSH.
"""
ret, _ = self._call(cmd, False)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output
def _resolve_queue(self, queue, depth=0, links=None):
"""Finds the location of tej's queue directory on the server.
The `queue` set when constructing this `RemoteQueue` might be relative
to the home directory and might contain ``~user`` placeholders. Also,
each queue may in fact be a link to another path (a file containing
the string ``tejdir:``, a space, and a new pathname, relative to this
link's location).
"""
if depth == 0:
logger.debug("resolve_queue(%s)", queue)
answer = self.check_output(
'if [ -d %(queue)s ]; then '
' cd %(queue)s; echo "dir"; cat version; pwd; '
'elif [ -f %(queue)s ]; then '
' cat %(queue)s; '
'else '
' echo no; '
'fi' % {
'queue': escape_queue(queue)})
if answer == b'no':
if depth > 0:
logger.debug("Broken link at depth=%d", depth)
else:
logger.debug("Path doesn't exist")
return None, depth
elif answer.startswith(b'dir\n'):
version, runtime, path = answer[4:].split(b'\n', 2)
try:
version = tuple(int(e)
for e in version.decode('ascii', 'ignore')
.split('.'))
except ValueError:
version = 0, 0
if version[:2] != self.PROTOCOL_VERSION:
raise QueueExists(
msg="Queue exists and is using incompatible protocol "
"version %s" % '.'.join('%s' % e for e in version))
path = PosixPath(path)
runtime = runtime.decode('ascii', 'replace')
if self.need_runtime is not None:
if (self.need_runtime is not None and
runtime not in self.need_runtime):
raise QueueExists(
msg="Queue exists and is using explicitely disallowed "
"runtime %s" % runtime)
logger.debug("Found directory at %s, depth=%d, runtime=%s",
path, depth, runtime)
return path, depth
elif answer.startswith(b'tejdir: '):
new = queue.parent / answer[8:]
logger.debug("Found link to %s, recursing", new)
if links is not None:
links.append(queue)
return self._resolve_queue(new, depth + 1)
else: # pragma: no cover
logger.debug("Server returned %r", answer)
raise RemoteCommandFailure(msg="Queue resolution command failed "
"in unexpected way")
def _get_queue(self):
"""Gets the actual location of the queue, or None.
"""
if self._queue is None:
self._links = []
queue, depth = self._resolve_queue(self.queue, links=self._links)
if queue is None and depth > 0:
raise QueueLinkBroken
self._queue = queue
return self._queue
def setup(self, links=None, force=False, only_links=False):
"""Installs the runtime at the target location.
This will not replace an existing installation, unless `force` is True.
After installation, creates links to this installation at the specified
locations.
"""
if not links:
links = []
if only_links:
logger.info("Only creating links")
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(self.queue),
'link': escape_queue(link)})
return
queue, depth = self._resolve_queue(self.queue)
if queue is not None or depth > 0:
if force:
if queue is None:
logger.info("Replacing broken link")
elif depth > 0:
logger.info("Replacing link to %s...", queue)
else:
logger.info("Replacing existing queue...")
self.check_call('rm -Rf %s' % escape_queue(self.queue))
else:
if queue is not None and depth > 0:
raise QueueExists("Queue already exists (links to %s)\n"
"Use --force to replace" % queue)
elif depth > 0:
raise QueueExists("Broken link exists\n"
"Use --force to replace")
else:
raise QueueExists("Queue already exists\n"
"Use --force to replace")
queue = self._setup()
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(queue),
'link': escape_queue(link)})
def _setup(self):
"""Actually installs the runtime.
"""
# Expands ~user in queue
if self.queue.path[0:1] == b'/':
queue = self.queue
else:
if self.queue.path[0:1] == b'~':
output = self.check_output('echo %s' %
escape_queue(self.queue))
queue = PosixPath(output.rstrip(b'\r\n'))
else:
output = self.check_output('pwd')
queue = PosixPath(output.rstrip(b'\r\n')) / self.queue
logger.debug("Resolved to %s", queue)
# Select runtime
if not self.setup_runtime:
# Autoselect
if self._call('which qsub', False)[0] == 0:
logger.debug("qsub is available, using runtime 'pbs'")
runtime = 'pbs'
else:
logger.debug("qsub not found, using runtime 'default'")
runtime = 'default'
else:
runtime = self.setup_runtime
if self.need_runtime is not None and runtime not in self.need_runtime:
raise ValueError("About to setup runtime %s but that wouldn't "
"match explicitely allowed runtimes" % runtime)
logger.info("Installing runtime %s%s at %s",
runtime,
"" if self.setup_runtime else " (auto)",
self.queue)
# Uploads runtime
scp_client = self.get_scp_client()
filename = pkg_resources.resource_filename('tej',
'remotes/%s' % runtime)
scp_client.put(filename, str(queue), recursive=True)
logger.debug("Files uploaded")
# Runs post-setup script
self.check_call('/bin/sh %s' % shell_escape(queue / 'commands/setup'))
logger.debug("Post-setup script done")
self._queue = queue
return queue
def submit(self, job_id, directory, script=None):
"""Submits a job to the queue.
If the runtime is not there, it will be installed. If it is a broken
chain of links, error.
"""
if job_id is None:
job_id = '%s_%s_%s' % (Path(directory).unicodename,
self.destination['username'],
make_unique_name())
else:
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
queue = self._setup()
if script is None:
script = 'start.sh'
# Create directory
ret, target = self._call('%s %s' % (
shell_escape(queue / 'commands/new_job'),
job_id),
True)
if ret == 4:
raise JobAlreadyExists
elif ret != 0:
raise JobNotFound("Couldn't create job")
target = PosixPath(target)
logger.debug("Server created directory %s", target)
# Upload to directory
try:
scp_client = self.get_scp_client()
scp_client.put(str(Path(directory)),
str(target),
recursive=True)
except BaseException as e:
try:
self.delete(job_id)
except BaseException:
raise e
raise
logger.debug("Files uploaded")
# Submit job
self.check_call('%s %s %s %s' % (
shell_escape(queue / 'commands/submit'),
job_id, shell_escape(target),
shell_escape(script)))
logger.info("Submitted job %s", job_id)
return job_id
def status(self, job_id):
"""Gets the status of a previously-submitted job.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/status'),
job_id),
True)
if ret == 0:
directory, result = output.splitlines()
result = result.decode('utf-8')
return RemoteQueue.JOB_DONE, PosixPath(directory), result
elif ret == 2:
directory = output.splitlines()[0]
return RemoteQueue.JOB_RUNNING, PosixPath(directory), None
elif ret == 3:
raise JobNotFound
else:
raise RemoteCommandFailure(command="commands/status",
ret=ret)
def download(self, job_id, files, **kwargs):
"""Downloads files from server.
"""
check_jobid(job_id)
if not files:
return
if isinstance(files, string_types):
files = [files]
directory = False
recursive = kwargs.pop('recursive', True)
if 'destination' in kwargs and 'directory' in kwargs:
raise TypeError("Only use one of 'destination' or 'directory'")
elif 'destination' in kwargs:
destination = Path(kwargs.pop('destination'))
if len(files) != 1:
raise ValueError("'destination' specified but multiple files "
"given; did you mean to use 'directory'?")
elif 'directory' in kwargs:
destination = Path(kwargs.pop('directory'))
directory = True
if kwargs:
raise TypeError("Got unexpected keyword arguments")
# Might raise JobNotFound
status, target, result = self.status(job_id)
scp_client = self.get_scp_client()
for filename in files:
logger.info("Downloading %s", target / filename)
if directory:
scp_client.get(str(target / filename),
str(destination / filename),
recursive=recursive)
else:
scp_client.get(str(target / filename),
str(destination),
recursive=recursive)
def kill(self, job_id):
"""Kills a job on the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/kill'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret != 0:
raise RemoteCommandFailure(command='commands/kill',
ret=ret)
def delete(self, job_id):
"""Deletes a job from the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/delete'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret == 2:
raise JobStillRunning
elif ret != 0:
raise RemoteCommandFailure(command='commands/delete',
ret=ret)
def list(self):
"""Lists the jobs on the server.
"""
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
output = self.check_output('%s' %
shell_escape(queue / 'commands/list'))
job_id, info = None, None
for line in output.splitlines():
line = line.decode('utf-8')
if line.startswith(' '):
key, value = line[4:].split(': ', 1)
info[key] = value
else:
if job_id is not None:
yield job_id, info
job_id = line
info = {}
if job_id is not None:
yield job_id, info
def cleanup(self, kill=False):
queue = self._get_queue()
if queue is not None:
# Kill jobs
for job_id, info in self.list():
if info['status'] == 'running':
if not kill:
raise JobStillRunning("Can't cleanup, some jobs are "
"still running")
else:
logger.info("Killing running job %s", job_id)
self.kill(job_id)
# Remove queue
logger.info("Removing queue at %s", queue)
self.check_call('rm -rf -- %s' % shell_escape(queue))
# Remove links
for link in self._links:
self.check_call('rm -rf -- %s' % shell_escape(link))
return True
|
VisTrails/tej | tej/submission.py | escape_queue | python | def escape_queue(s):
if isinstance(s, PosixPath):
s = unicode_(s)
elif isinstance(s, bytes):
s = s.decode('utf-8')
if s.startswith('~/'):
return '~/' + shell_escape(s[2:])
else:
return shell_escape(s) | Escapes the path to a queue, e.g. preserves ~ at the begining. | train | https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L52-L62 | [
"def shell_escape(s):\n r\"\"\"Given bl\"a, returns \"bl\\\\\"a\".\n \"\"\"\n if isinstance(s, PosixPath):\n s = unicode_(s)\n elif isinstance(s, bytes):\n s = s.decode('utf-8')\n if not s or any(c not in safe_shell_chars for c in s):\n return '\"%s\"' % (s.replace('\\\\', '\\\\\\\\')\n .replace('\"', '\\\\\"')\n .replace('`', '\\\\`')\n .replace('$', '\\\\$'))\n else:\n return s\n"
] | from __future__ import absolute_import, division, unicode_literals
import getpass
import logging
import paramiko
import pkg_resources
import random
import re
from rpaths import PosixPath, Path
import scp
import select
import socket
from tej.errors import InvalidDestination, QueueDoesntExist, \
QueueLinkBroken, QueueExists, JobAlreadyExists, JobNotFound, \
JobStillRunning, RemoteCommandFailure
from tej.utils import unicode_, string_types, iteritems, irange, shell_escape
__all__ = ['DEFAULT_TEJ_DIR',
'parse_ssh_destination', 'destination_as_string',
'ServerLogger', 'RemoteQueue']
DEFAULT_TEJ_DIR = '~/.tej'
logger = logging.getLogger('tej')
def _unique_names():
"""Generates unique sequences of bytes.
"""
characters = ("abcdefghijklmnopqrstuvwxyz"
"0123456789")
characters = [characters[i:i + 1] for i in irange(len(characters))]
rng = random.Random()
while True:
letters = [rng.choice(characters) for i in irange(10)]
yield ''.join(letters)
_unique_names = _unique_names()
def make_unique_name():
"""Makes a unique (random) string.
"""
return next(_unique_names)
_re_ssh = re.compile(r'^'
r'(?:ssh://)?' # 'ssh://' prefix
r'(?:([a-zA-Z0-9_.-]+)' # 'user@'
r'(?::([^ @]+))?' # ':password'
r'@)?' # '@'
r'([a-zA-Z0-9_.-]+)' # 'host'
r'(?::([0-9]+))?' # ':port'
r'$')
def parse_ssh_destination(destination):
"""Parses the SSH destination argument.
"""
match = _re_ssh.match(destination)
if not match:
raise InvalidDestination("Invalid destination: %s" % destination)
user, password, host, port = match.groups()
info = {}
if user:
info['username'] = user
else:
info['username'] = getpass.getuser()
if password:
info['password'] = password
if port:
info['port'] = int(port)
info['hostname'] = host
return info
def destination_as_string(destination):
if 'password' in destination:
user = '%s:%s' % (destination['username'], destination['password'])
else:
user = destination['username']
if destination.get('port', 22) != 22:
return 'ssh://%s@%s:%d' % (user,
destination['hostname'],
destination['port'])
else:
return 'ssh://%s@%s' % (user,
destination['hostname'])
class ServerLogger(object):
"""Adapter getting bytes from the server's output and handing them to log.
"""
logger = logging.getLogger('tej.server')
def __init__(self):
self.data = []
def append(self, data):
self.data.append(data)
def done(self):
if self.data:
data = b''.join(self.data)
data = data.decode('utf-8', 'replace')
data = data.rstrip()
self.message(data)
self.data = []
def message(self, data):
self.logger.info(data)
JOB_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz" \
"0123456789_-+=@%:.,"
def check_jobid(job_id):
if not all(c in JOB_ID_CHARS for c in job_id):
raise ValueError("Invalid job identifier")
class RemoteQueue(object):
JOB_DONE = 'finished'
JOB_RUNNING = 'running'
JOB_INCOMPLETE = 'incomplete'
JOB_CREATED = 'created'
PROTOCOL_VERSION = 0, 2
def __init__(self, destination, queue,
setup_runtime=None, need_runtime=None):
"""Creates a queue object, that represents a job queue on a server.
:param destination: The address of the server, used to SSH into it.
:param queue: The pathname of the queue on the remote server. Something
like "~/.tej" is usually adequate. This will contain both the job info
and files, and the scripts used to manage it on the server side.
:param setup_runtime: The name of the runtime to deploy on the server
if the queue doesn't already exist. If None (default), it will
auto-detect what is appropriate (currently, `pbs` if the ``qsub``
command is available), and fallback on `default`. If `need_runtime` is
set, this should be one of the accepted values.
:param need_runtime: A list of runtime names that are acceptable. If
the queue already exists on the server and this argument is not None,
the installed runtime will be matched against it, and a failure will be
reported if it is not one of the provided values.
"""
if isinstance(destination, string_types):
self.destination = parse_ssh_destination(destination)
else:
if 'hostname' not in destination:
raise InvalidDestination("destination dictionary is missing "
"hostname")
self.destination = destination
if setup_runtime not in (None, 'default', 'pbs'):
raise ValueError("Selected runtime %r is unknown" % setup_runtime)
self.setup_runtime = setup_runtime
if need_runtime is not None:
self.need_runtime = set(need_runtime)
else:
self.need_runtime = None
self.queue = PosixPath(queue)
self._queue = None
self._ssh = None
self._connect()
def server_logger(self):
"""Handles messages from the server.
By default, uses getLogger('tej.server').warning(). Override this in
subclasses to provide your own mechanism.
"""
return ServerLogger()
@property
def destination_string(self):
return destination_as_string(self.destination)
def _ssh_client(self):
"""Gets an SSH client to connect with.
"""
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
return ssh
def _connect(self):
"""Connects via SSH.
"""
ssh = self._ssh_client()
logger.debug("Connecting with %s",
', '.join('%s=%r' % (k, v if k != "password" else "***")
for k, v in iteritems(self.destination)))
ssh.connect(**self.destination)
logger.debug("Connected to %s", self.destination['hostname'])
self._ssh = ssh
def get_client(self):
"""Gets the SSH client.
This will check that the connection is still alive first, and reconnect
if necessary.
"""
if self._ssh is None:
self._connect()
return self._ssh
else:
try:
chan = self._ssh.get_transport().open_session()
except (socket.error, paramiko.SSHException):
logger.warning("Lost connection, reconnecting...")
self._ssh.close()
self._connect()
else:
chan.close()
return self._ssh
def get_scp_client(self):
return scp.SCPClient(self.get_client().get_transport())
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close()
def check_call(self, cmd):
"""Calls a command through SSH.
"""
ret, _ = self._call(cmd, False)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output
def _resolve_queue(self, queue, depth=0, links=None):
"""Finds the location of tej's queue directory on the server.
The `queue` set when constructing this `RemoteQueue` might be relative
to the home directory and might contain ``~user`` placeholders. Also,
each queue may in fact be a link to another path (a file containing
the string ``tejdir:``, a space, and a new pathname, relative to this
link's location).
"""
if depth == 0:
logger.debug("resolve_queue(%s)", queue)
answer = self.check_output(
'if [ -d %(queue)s ]; then '
' cd %(queue)s; echo "dir"; cat version; pwd; '
'elif [ -f %(queue)s ]; then '
' cat %(queue)s; '
'else '
' echo no; '
'fi' % {
'queue': escape_queue(queue)})
if answer == b'no':
if depth > 0:
logger.debug("Broken link at depth=%d", depth)
else:
logger.debug("Path doesn't exist")
return None, depth
elif answer.startswith(b'dir\n'):
version, runtime, path = answer[4:].split(b'\n', 2)
try:
version = tuple(int(e)
for e in version.decode('ascii', 'ignore')
.split('.'))
except ValueError:
version = 0, 0
if version[:2] != self.PROTOCOL_VERSION:
raise QueueExists(
msg="Queue exists and is using incompatible protocol "
"version %s" % '.'.join('%s' % e for e in version))
path = PosixPath(path)
runtime = runtime.decode('ascii', 'replace')
if self.need_runtime is not None:
if (self.need_runtime is not None and
runtime not in self.need_runtime):
raise QueueExists(
msg="Queue exists and is using explicitely disallowed "
"runtime %s" % runtime)
logger.debug("Found directory at %s, depth=%d, runtime=%s",
path, depth, runtime)
return path, depth
elif answer.startswith(b'tejdir: '):
new = queue.parent / answer[8:]
logger.debug("Found link to %s, recursing", new)
if links is not None:
links.append(queue)
return self._resolve_queue(new, depth + 1)
else: # pragma: no cover
logger.debug("Server returned %r", answer)
raise RemoteCommandFailure(msg="Queue resolution command failed "
"in unexpected way")
def _get_queue(self):
"""Gets the actual location of the queue, or None.
"""
if self._queue is None:
self._links = []
queue, depth = self._resolve_queue(self.queue, links=self._links)
if queue is None and depth > 0:
raise QueueLinkBroken
self._queue = queue
return self._queue
def setup(self, links=None, force=False, only_links=False):
"""Installs the runtime at the target location.
This will not replace an existing installation, unless `force` is True.
After installation, creates links to this installation at the specified
locations.
"""
if not links:
links = []
if only_links:
logger.info("Only creating links")
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(self.queue),
'link': escape_queue(link)})
return
queue, depth = self._resolve_queue(self.queue)
if queue is not None or depth > 0:
if force:
if queue is None:
logger.info("Replacing broken link")
elif depth > 0:
logger.info("Replacing link to %s...", queue)
else:
logger.info("Replacing existing queue...")
self.check_call('rm -Rf %s' % escape_queue(self.queue))
else:
if queue is not None and depth > 0:
raise QueueExists("Queue already exists (links to %s)\n"
"Use --force to replace" % queue)
elif depth > 0:
raise QueueExists("Broken link exists\n"
"Use --force to replace")
else:
raise QueueExists("Queue already exists\n"
"Use --force to replace")
queue = self._setup()
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(queue),
'link': escape_queue(link)})
def _setup(self):
"""Actually installs the runtime.
"""
# Expands ~user in queue
if self.queue.path[0:1] == b'/':
queue = self.queue
else:
if self.queue.path[0:1] == b'~':
output = self.check_output('echo %s' %
escape_queue(self.queue))
queue = PosixPath(output.rstrip(b'\r\n'))
else:
output = self.check_output('pwd')
queue = PosixPath(output.rstrip(b'\r\n')) / self.queue
logger.debug("Resolved to %s", queue)
# Select runtime
if not self.setup_runtime:
# Autoselect
if self._call('which qsub', False)[0] == 0:
logger.debug("qsub is available, using runtime 'pbs'")
runtime = 'pbs'
else:
logger.debug("qsub not found, using runtime 'default'")
runtime = 'default'
else:
runtime = self.setup_runtime
if self.need_runtime is not None and runtime not in self.need_runtime:
raise ValueError("About to setup runtime %s but that wouldn't "
"match explicitely allowed runtimes" % runtime)
logger.info("Installing runtime %s%s at %s",
runtime,
"" if self.setup_runtime else " (auto)",
self.queue)
# Uploads runtime
scp_client = self.get_scp_client()
filename = pkg_resources.resource_filename('tej',
'remotes/%s' % runtime)
scp_client.put(filename, str(queue), recursive=True)
logger.debug("Files uploaded")
# Runs post-setup script
self.check_call('/bin/sh %s' % shell_escape(queue / 'commands/setup'))
logger.debug("Post-setup script done")
self._queue = queue
return queue
def submit(self, job_id, directory, script=None):
"""Submits a job to the queue.
If the runtime is not there, it will be installed. If it is a broken
chain of links, error.
"""
if job_id is None:
job_id = '%s_%s_%s' % (Path(directory).unicodename,
self.destination['username'],
make_unique_name())
else:
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
queue = self._setup()
if script is None:
script = 'start.sh'
# Create directory
ret, target = self._call('%s %s' % (
shell_escape(queue / 'commands/new_job'),
job_id),
True)
if ret == 4:
raise JobAlreadyExists
elif ret != 0:
raise JobNotFound("Couldn't create job")
target = PosixPath(target)
logger.debug("Server created directory %s", target)
# Upload to directory
try:
scp_client = self.get_scp_client()
scp_client.put(str(Path(directory)),
str(target),
recursive=True)
except BaseException as e:
try:
self.delete(job_id)
except BaseException:
raise e
raise
logger.debug("Files uploaded")
# Submit job
self.check_call('%s %s %s %s' % (
shell_escape(queue / 'commands/submit'),
job_id, shell_escape(target),
shell_escape(script)))
logger.info("Submitted job %s", job_id)
return job_id
def status(self, job_id):
"""Gets the status of a previously-submitted job.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/status'),
job_id),
True)
if ret == 0:
directory, result = output.splitlines()
result = result.decode('utf-8')
return RemoteQueue.JOB_DONE, PosixPath(directory), result
elif ret == 2:
directory = output.splitlines()[0]
return RemoteQueue.JOB_RUNNING, PosixPath(directory), None
elif ret == 3:
raise JobNotFound
else:
raise RemoteCommandFailure(command="commands/status",
ret=ret)
def download(self, job_id, files, **kwargs):
"""Downloads files from server.
"""
check_jobid(job_id)
if not files:
return
if isinstance(files, string_types):
files = [files]
directory = False
recursive = kwargs.pop('recursive', True)
if 'destination' in kwargs and 'directory' in kwargs:
raise TypeError("Only use one of 'destination' or 'directory'")
elif 'destination' in kwargs:
destination = Path(kwargs.pop('destination'))
if len(files) != 1:
raise ValueError("'destination' specified but multiple files "
"given; did you mean to use 'directory'?")
elif 'directory' in kwargs:
destination = Path(kwargs.pop('directory'))
directory = True
if kwargs:
raise TypeError("Got unexpected keyword arguments")
# Might raise JobNotFound
status, target, result = self.status(job_id)
scp_client = self.get_scp_client()
for filename in files:
logger.info("Downloading %s", target / filename)
if directory:
scp_client.get(str(target / filename),
str(destination / filename),
recursive=recursive)
else:
scp_client.get(str(target / filename),
str(destination),
recursive=recursive)
def kill(self, job_id):
"""Kills a job on the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/kill'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret != 0:
raise RemoteCommandFailure(command='commands/kill',
ret=ret)
def delete(self, job_id):
"""Deletes a job from the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/delete'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret == 2:
raise JobStillRunning
elif ret != 0:
raise RemoteCommandFailure(command='commands/delete',
ret=ret)
def list(self):
"""Lists the jobs on the server.
"""
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
output = self.check_output('%s' %
shell_escape(queue / 'commands/list'))
job_id, info = None, None
for line in output.splitlines():
line = line.decode('utf-8')
if line.startswith(' '):
key, value = line[4:].split(': ', 1)
info[key] = value
else:
if job_id is not None:
yield job_id, info
job_id = line
info = {}
if job_id is not None:
yield job_id, info
def cleanup(self, kill=False):
queue = self._get_queue()
if queue is not None:
# Kill jobs
for job_id, info in self.list():
if info['status'] == 'running':
if not kill:
raise JobStillRunning("Can't cleanup, some jobs are "
"still running")
else:
logger.info("Killing running job %s", job_id)
self.kill(job_id)
# Remove queue
logger.info("Removing queue at %s", queue)
self.check_call('rm -rf -- %s' % shell_escape(queue))
# Remove links
for link in self._links:
self.check_call('rm -rf -- %s' % shell_escape(link))
return True
|
VisTrails/tej | tej/submission.py | parse_ssh_destination | python | def parse_ssh_destination(destination):
match = _re_ssh.match(destination)
if not match:
raise InvalidDestination("Invalid destination: %s" % destination)
user, password, host, port = match.groups()
info = {}
if user:
info['username'] = user
else:
info['username'] = getpass.getuser()
if password:
info['password'] = password
if port:
info['port'] = int(port)
info['hostname'] = host
return info | Parses the SSH destination argument. | train | https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L75-L93 | null | from __future__ import absolute_import, division, unicode_literals
import getpass
import logging
import paramiko
import pkg_resources
import random
import re
from rpaths import PosixPath, Path
import scp
import select
import socket
from tej.errors import InvalidDestination, QueueDoesntExist, \
QueueLinkBroken, QueueExists, JobAlreadyExists, JobNotFound, \
JobStillRunning, RemoteCommandFailure
from tej.utils import unicode_, string_types, iteritems, irange, shell_escape
__all__ = ['DEFAULT_TEJ_DIR',
'parse_ssh_destination', 'destination_as_string',
'ServerLogger', 'RemoteQueue']
DEFAULT_TEJ_DIR = '~/.tej'
logger = logging.getLogger('tej')
def _unique_names():
"""Generates unique sequences of bytes.
"""
characters = ("abcdefghijklmnopqrstuvwxyz"
"0123456789")
characters = [characters[i:i + 1] for i in irange(len(characters))]
rng = random.Random()
while True:
letters = [rng.choice(characters) for i in irange(10)]
yield ''.join(letters)
_unique_names = _unique_names()
def make_unique_name():
"""Makes a unique (random) string.
"""
return next(_unique_names)
def escape_queue(s):
"""Escapes the path to a queue, e.g. preserves ~ at the begining.
"""
if isinstance(s, PosixPath):
s = unicode_(s)
elif isinstance(s, bytes):
s = s.decode('utf-8')
if s.startswith('~/'):
return '~/' + shell_escape(s[2:])
else:
return shell_escape(s)
_re_ssh = re.compile(r'^'
r'(?:ssh://)?' # 'ssh://' prefix
r'(?:([a-zA-Z0-9_.-]+)' # 'user@'
r'(?::([^ @]+))?' # ':password'
r'@)?' # '@'
r'([a-zA-Z0-9_.-]+)' # 'host'
r'(?::([0-9]+))?' # ':port'
r'$')
def destination_as_string(destination):
if 'password' in destination:
user = '%s:%s' % (destination['username'], destination['password'])
else:
user = destination['username']
if destination.get('port', 22) != 22:
return 'ssh://%s@%s:%d' % (user,
destination['hostname'],
destination['port'])
else:
return 'ssh://%s@%s' % (user,
destination['hostname'])
class ServerLogger(object):
"""Adapter getting bytes from the server's output and handing them to log.
"""
logger = logging.getLogger('tej.server')
def __init__(self):
self.data = []
def append(self, data):
self.data.append(data)
def done(self):
if self.data:
data = b''.join(self.data)
data = data.decode('utf-8', 'replace')
data = data.rstrip()
self.message(data)
self.data = []
def message(self, data):
self.logger.info(data)
JOB_ID_CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" \
"abcdefghijklmnopqrstuvwxyz" \
"0123456789_-+=@%:.,"
def check_jobid(job_id):
if not all(c in JOB_ID_CHARS for c in job_id):
raise ValueError("Invalid job identifier")
class RemoteQueue(object):
JOB_DONE = 'finished'
JOB_RUNNING = 'running'
JOB_INCOMPLETE = 'incomplete'
JOB_CREATED = 'created'
PROTOCOL_VERSION = 0, 2
def __init__(self, destination, queue,
setup_runtime=None, need_runtime=None):
"""Creates a queue object, that represents a job queue on a server.
:param destination: The address of the server, used to SSH into it.
:param queue: The pathname of the queue on the remote server. Something
like "~/.tej" is usually adequate. This will contain both the job info
and files, and the scripts used to manage it on the server side.
:param setup_runtime: The name of the runtime to deploy on the server
if the queue doesn't already exist. If None (default), it will
auto-detect what is appropriate (currently, `pbs` if the ``qsub``
command is available), and fallback on `default`. If `need_runtime` is
set, this should be one of the accepted values.
:param need_runtime: A list of runtime names that are acceptable. If
the queue already exists on the server and this argument is not None,
the installed runtime will be matched against it, and a failure will be
reported if it is not one of the provided values.
"""
if isinstance(destination, string_types):
self.destination = parse_ssh_destination(destination)
else:
if 'hostname' not in destination:
raise InvalidDestination("destination dictionary is missing "
"hostname")
self.destination = destination
if setup_runtime not in (None, 'default', 'pbs'):
raise ValueError("Selected runtime %r is unknown" % setup_runtime)
self.setup_runtime = setup_runtime
if need_runtime is not None:
self.need_runtime = set(need_runtime)
else:
self.need_runtime = None
self.queue = PosixPath(queue)
self._queue = None
self._ssh = None
self._connect()
def server_logger(self):
"""Handles messages from the server.
By default, uses getLogger('tej.server').warning(). Override this in
subclasses to provide your own mechanism.
"""
return ServerLogger()
@property
def destination_string(self):
return destination_as_string(self.destination)
def _ssh_client(self):
"""Gets an SSH client to connect with.
"""
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
return ssh
def _connect(self):
"""Connects via SSH.
"""
ssh = self._ssh_client()
logger.debug("Connecting with %s",
', '.join('%s=%r' % (k, v if k != "password" else "***")
for k, v in iteritems(self.destination)))
ssh.connect(**self.destination)
logger.debug("Connected to %s", self.destination['hostname'])
self._ssh = ssh
def get_client(self):
"""Gets the SSH client.
This will check that the connection is still alive first, and reconnect
if necessary.
"""
if self._ssh is None:
self._connect()
return self._ssh
else:
try:
chan = self._ssh.get_transport().open_session()
except (socket.error, paramiko.SSHException):
logger.warning("Lost connection, reconnecting...")
self._ssh.close()
self._connect()
else:
chan.close()
return self._ssh
def get_scp_client(self):
return scp.SCPClient(self.get_client().get_transport())
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close()
def check_call(self, cmd):
"""Calls a command through SSH.
"""
ret, _ = self._call(cmd, False)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output
def _resolve_queue(self, queue, depth=0, links=None):
"""Finds the location of tej's queue directory on the server.
The `queue` set when constructing this `RemoteQueue` might be relative
to the home directory and might contain ``~user`` placeholders. Also,
each queue may in fact be a link to another path (a file containing
the string ``tejdir:``, a space, and a new pathname, relative to this
link's location).
"""
if depth == 0:
logger.debug("resolve_queue(%s)", queue)
answer = self.check_output(
'if [ -d %(queue)s ]; then '
' cd %(queue)s; echo "dir"; cat version; pwd; '
'elif [ -f %(queue)s ]; then '
' cat %(queue)s; '
'else '
' echo no; '
'fi' % {
'queue': escape_queue(queue)})
if answer == b'no':
if depth > 0:
logger.debug("Broken link at depth=%d", depth)
else:
logger.debug("Path doesn't exist")
return None, depth
elif answer.startswith(b'dir\n'):
version, runtime, path = answer[4:].split(b'\n', 2)
try:
version = tuple(int(e)
for e in version.decode('ascii', 'ignore')
.split('.'))
except ValueError:
version = 0, 0
if version[:2] != self.PROTOCOL_VERSION:
raise QueueExists(
msg="Queue exists and is using incompatible protocol "
"version %s" % '.'.join('%s' % e for e in version))
path = PosixPath(path)
runtime = runtime.decode('ascii', 'replace')
if self.need_runtime is not None:
if (self.need_runtime is not None and
runtime not in self.need_runtime):
raise QueueExists(
msg="Queue exists and is using explicitely disallowed "
"runtime %s" % runtime)
logger.debug("Found directory at %s, depth=%d, runtime=%s",
path, depth, runtime)
return path, depth
elif answer.startswith(b'tejdir: '):
new = queue.parent / answer[8:]
logger.debug("Found link to %s, recursing", new)
if links is not None:
links.append(queue)
return self._resolve_queue(new, depth + 1)
else: # pragma: no cover
logger.debug("Server returned %r", answer)
raise RemoteCommandFailure(msg="Queue resolution command failed "
"in unexpected way")
def _get_queue(self):
"""Gets the actual location of the queue, or None.
"""
if self._queue is None:
self._links = []
queue, depth = self._resolve_queue(self.queue, links=self._links)
if queue is None and depth > 0:
raise QueueLinkBroken
self._queue = queue
return self._queue
def setup(self, links=None, force=False, only_links=False):
"""Installs the runtime at the target location.
This will not replace an existing installation, unless `force` is True.
After installation, creates links to this installation at the specified
locations.
"""
if not links:
links = []
if only_links:
logger.info("Only creating links")
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(self.queue),
'link': escape_queue(link)})
return
queue, depth = self._resolve_queue(self.queue)
if queue is not None or depth > 0:
if force:
if queue is None:
logger.info("Replacing broken link")
elif depth > 0:
logger.info("Replacing link to %s...", queue)
else:
logger.info("Replacing existing queue...")
self.check_call('rm -Rf %s' % escape_queue(self.queue))
else:
if queue is not None and depth > 0:
raise QueueExists("Queue already exists (links to %s)\n"
"Use --force to replace" % queue)
elif depth > 0:
raise QueueExists("Broken link exists\n"
"Use --force to replace")
else:
raise QueueExists("Queue already exists\n"
"Use --force to replace")
queue = self._setup()
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(queue),
'link': escape_queue(link)})
def _setup(self):
"""Actually installs the runtime.
"""
# Expands ~user in queue
if self.queue.path[0:1] == b'/':
queue = self.queue
else:
if self.queue.path[0:1] == b'~':
output = self.check_output('echo %s' %
escape_queue(self.queue))
queue = PosixPath(output.rstrip(b'\r\n'))
else:
output = self.check_output('pwd')
queue = PosixPath(output.rstrip(b'\r\n')) / self.queue
logger.debug("Resolved to %s", queue)
# Select runtime
if not self.setup_runtime:
# Autoselect
if self._call('which qsub', False)[0] == 0:
logger.debug("qsub is available, using runtime 'pbs'")
runtime = 'pbs'
else:
logger.debug("qsub not found, using runtime 'default'")
runtime = 'default'
else:
runtime = self.setup_runtime
if self.need_runtime is not None and runtime not in self.need_runtime:
raise ValueError("About to setup runtime %s but that wouldn't "
"match explicitely allowed runtimes" % runtime)
logger.info("Installing runtime %s%s at %s",
runtime,
"" if self.setup_runtime else " (auto)",
self.queue)
# Uploads runtime
scp_client = self.get_scp_client()
filename = pkg_resources.resource_filename('tej',
'remotes/%s' % runtime)
scp_client.put(filename, str(queue), recursive=True)
logger.debug("Files uploaded")
# Runs post-setup script
self.check_call('/bin/sh %s' % shell_escape(queue / 'commands/setup'))
logger.debug("Post-setup script done")
self._queue = queue
return queue
def submit(self, job_id, directory, script=None):
"""Submits a job to the queue.
If the runtime is not there, it will be installed. If it is a broken
chain of links, error.
"""
if job_id is None:
job_id = '%s_%s_%s' % (Path(directory).unicodename,
self.destination['username'],
make_unique_name())
else:
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
queue = self._setup()
if script is None:
script = 'start.sh'
# Create directory
ret, target = self._call('%s %s' % (
shell_escape(queue / 'commands/new_job'),
job_id),
True)
if ret == 4:
raise JobAlreadyExists
elif ret != 0:
raise JobNotFound("Couldn't create job")
target = PosixPath(target)
logger.debug("Server created directory %s", target)
# Upload to directory
try:
scp_client = self.get_scp_client()
scp_client.put(str(Path(directory)),
str(target),
recursive=True)
except BaseException as e:
try:
self.delete(job_id)
except BaseException:
raise e
raise
logger.debug("Files uploaded")
# Submit job
self.check_call('%s %s %s %s' % (
shell_escape(queue / 'commands/submit'),
job_id, shell_escape(target),
shell_escape(script)))
logger.info("Submitted job %s", job_id)
return job_id
def status(self, job_id):
"""Gets the status of a previously-submitted job.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/status'),
job_id),
True)
if ret == 0:
directory, result = output.splitlines()
result = result.decode('utf-8')
return RemoteQueue.JOB_DONE, PosixPath(directory), result
elif ret == 2:
directory = output.splitlines()[0]
return RemoteQueue.JOB_RUNNING, PosixPath(directory), None
elif ret == 3:
raise JobNotFound
else:
raise RemoteCommandFailure(command="commands/status",
ret=ret)
def download(self, job_id, files, **kwargs):
"""Downloads files from server.
"""
check_jobid(job_id)
if not files:
return
if isinstance(files, string_types):
files = [files]
directory = False
recursive = kwargs.pop('recursive', True)
if 'destination' in kwargs and 'directory' in kwargs:
raise TypeError("Only use one of 'destination' or 'directory'")
elif 'destination' in kwargs:
destination = Path(kwargs.pop('destination'))
if len(files) != 1:
raise ValueError("'destination' specified but multiple files "
"given; did you mean to use 'directory'?")
elif 'directory' in kwargs:
destination = Path(kwargs.pop('directory'))
directory = True
if kwargs:
raise TypeError("Got unexpected keyword arguments")
# Might raise JobNotFound
status, target, result = self.status(job_id)
scp_client = self.get_scp_client()
for filename in files:
logger.info("Downloading %s", target / filename)
if directory:
scp_client.get(str(target / filename),
str(destination / filename),
recursive=recursive)
else:
scp_client.get(str(target / filename),
str(destination),
recursive=recursive)
def kill(self, job_id):
"""Kills a job on the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/kill'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret != 0:
raise RemoteCommandFailure(command='commands/kill',
ret=ret)
def delete(self, job_id):
"""Deletes a job from the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/delete'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret == 2:
raise JobStillRunning
elif ret != 0:
raise RemoteCommandFailure(command='commands/delete',
ret=ret)
def list(self):
"""Lists the jobs on the server.
"""
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
output = self.check_output('%s' %
shell_escape(queue / 'commands/list'))
job_id, info = None, None
for line in output.splitlines():
line = line.decode('utf-8')
if line.startswith(' '):
key, value = line[4:].split(': ', 1)
info[key] = value
else:
if job_id is not None:
yield job_id, info
job_id = line
info = {}
if job_id is not None:
yield job_id, info
def cleanup(self, kill=False):
queue = self._get_queue()
if queue is not None:
# Kill jobs
for job_id, info in self.list():
if info['status'] == 'running':
if not kill:
raise JobStillRunning("Can't cleanup, some jobs are "
"still running")
else:
logger.info("Killing running job %s", job_id)
self.kill(job_id)
# Remove queue
logger.info("Removing queue at %s", queue)
self.check_call('rm -rf -- %s' % shell_escape(queue))
# Remove links
for link in self._links:
self.check_call('rm -rf -- %s' % shell_escape(link))
return True
|
VisTrails/tej | tej/submission.py | RemoteQueue._ssh_client | python | def _ssh_client(self):
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
return ssh | Gets an SSH client to connect with. | train | https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L201-L207 | null | class RemoteQueue(object):
JOB_DONE = 'finished'
JOB_RUNNING = 'running'
JOB_INCOMPLETE = 'incomplete'
JOB_CREATED = 'created'
PROTOCOL_VERSION = 0, 2
def __init__(self, destination, queue,
setup_runtime=None, need_runtime=None):
"""Creates a queue object, that represents a job queue on a server.
:param destination: The address of the server, used to SSH into it.
:param queue: The pathname of the queue on the remote server. Something
like "~/.tej" is usually adequate. This will contain both the job info
and files, and the scripts used to manage it on the server side.
:param setup_runtime: The name of the runtime to deploy on the server
if the queue doesn't already exist. If None (default), it will
auto-detect what is appropriate (currently, `pbs` if the ``qsub``
command is available), and fallback on `default`. If `need_runtime` is
set, this should be one of the accepted values.
:param need_runtime: A list of runtime names that are acceptable. If
the queue already exists on the server and this argument is not None,
the installed runtime will be matched against it, and a failure will be
reported if it is not one of the provided values.
"""
if isinstance(destination, string_types):
self.destination = parse_ssh_destination(destination)
else:
if 'hostname' not in destination:
raise InvalidDestination("destination dictionary is missing "
"hostname")
self.destination = destination
if setup_runtime not in (None, 'default', 'pbs'):
raise ValueError("Selected runtime %r is unknown" % setup_runtime)
self.setup_runtime = setup_runtime
if need_runtime is not None:
self.need_runtime = set(need_runtime)
else:
self.need_runtime = None
self.queue = PosixPath(queue)
self._queue = None
self._ssh = None
self._connect()
def server_logger(self):
"""Handles messages from the server.
By default, uses getLogger('tej.server').warning(). Override this in
subclasses to provide your own mechanism.
"""
return ServerLogger()
@property
def destination_string(self):
return destination_as_string(self.destination)
def _connect(self):
"""Connects via SSH.
"""
ssh = self._ssh_client()
logger.debug("Connecting with %s",
', '.join('%s=%r' % (k, v if k != "password" else "***")
for k, v in iteritems(self.destination)))
ssh.connect(**self.destination)
logger.debug("Connected to %s", self.destination['hostname'])
self._ssh = ssh
def get_client(self):
"""Gets the SSH client.
This will check that the connection is still alive first, and reconnect
if necessary.
"""
if self._ssh is None:
self._connect()
return self._ssh
else:
try:
chan = self._ssh.get_transport().open_session()
except (socket.error, paramiko.SSHException):
logger.warning("Lost connection, reconnecting...")
self._ssh.close()
self._connect()
else:
chan.close()
return self._ssh
def get_scp_client(self):
return scp.SCPClient(self.get_client().get_transport())
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close()
def check_call(self, cmd):
"""Calls a command through SSH.
"""
ret, _ = self._call(cmd, False)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output
def _resolve_queue(self, queue, depth=0, links=None):
"""Finds the location of tej's queue directory on the server.
The `queue` set when constructing this `RemoteQueue` might be relative
to the home directory and might contain ``~user`` placeholders. Also,
each queue may in fact be a link to another path (a file containing
the string ``tejdir:``, a space, and a new pathname, relative to this
link's location).
"""
if depth == 0:
logger.debug("resolve_queue(%s)", queue)
answer = self.check_output(
'if [ -d %(queue)s ]; then '
' cd %(queue)s; echo "dir"; cat version; pwd; '
'elif [ -f %(queue)s ]; then '
' cat %(queue)s; '
'else '
' echo no; '
'fi' % {
'queue': escape_queue(queue)})
if answer == b'no':
if depth > 0:
logger.debug("Broken link at depth=%d", depth)
else:
logger.debug("Path doesn't exist")
return None, depth
elif answer.startswith(b'dir\n'):
version, runtime, path = answer[4:].split(b'\n', 2)
try:
version = tuple(int(e)
for e in version.decode('ascii', 'ignore')
.split('.'))
except ValueError:
version = 0, 0
if version[:2] != self.PROTOCOL_VERSION:
raise QueueExists(
msg="Queue exists and is using incompatible protocol "
"version %s" % '.'.join('%s' % e for e in version))
path = PosixPath(path)
runtime = runtime.decode('ascii', 'replace')
if self.need_runtime is not None:
if (self.need_runtime is not None and
runtime not in self.need_runtime):
raise QueueExists(
msg="Queue exists and is using explicitely disallowed "
"runtime %s" % runtime)
logger.debug("Found directory at %s, depth=%d, runtime=%s",
path, depth, runtime)
return path, depth
elif answer.startswith(b'tejdir: '):
new = queue.parent / answer[8:]
logger.debug("Found link to %s, recursing", new)
if links is not None:
links.append(queue)
return self._resolve_queue(new, depth + 1)
else: # pragma: no cover
logger.debug("Server returned %r", answer)
raise RemoteCommandFailure(msg="Queue resolution command failed "
"in unexpected way")
def _get_queue(self):
"""Gets the actual location of the queue, or None.
"""
if self._queue is None:
self._links = []
queue, depth = self._resolve_queue(self.queue, links=self._links)
if queue is None and depth > 0:
raise QueueLinkBroken
self._queue = queue
return self._queue
def setup(self, links=None, force=False, only_links=False):
"""Installs the runtime at the target location.
This will not replace an existing installation, unless `force` is True.
After installation, creates links to this installation at the specified
locations.
"""
if not links:
links = []
if only_links:
logger.info("Only creating links")
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(self.queue),
'link': escape_queue(link)})
return
queue, depth = self._resolve_queue(self.queue)
if queue is not None or depth > 0:
if force:
if queue is None:
logger.info("Replacing broken link")
elif depth > 0:
logger.info("Replacing link to %s...", queue)
else:
logger.info("Replacing existing queue...")
self.check_call('rm -Rf %s' % escape_queue(self.queue))
else:
if queue is not None and depth > 0:
raise QueueExists("Queue already exists (links to %s)\n"
"Use --force to replace" % queue)
elif depth > 0:
raise QueueExists("Broken link exists\n"
"Use --force to replace")
else:
raise QueueExists("Queue already exists\n"
"Use --force to replace")
queue = self._setup()
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(queue),
'link': escape_queue(link)})
def _setup(self):
"""Actually installs the runtime.
"""
# Expands ~user in queue
if self.queue.path[0:1] == b'/':
queue = self.queue
else:
if self.queue.path[0:1] == b'~':
output = self.check_output('echo %s' %
escape_queue(self.queue))
queue = PosixPath(output.rstrip(b'\r\n'))
else:
output = self.check_output('pwd')
queue = PosixPath(output.rstrip(b'\r\n')) / self.queue
logger.debug("Resolved to %s", queue)
# Select runtime
if not self.setup_runtime:
# Autoselect
if self._call('which qsub', False)[0] == 0:
logger.debug("qsub is available, using runtime 'pbs'")
runtime = 'pbs'
else:
logger.debug("qsub not found, using runtime 'default'")
runtime = 'default'
else:
runtime = self.setup_runtime
if self.need_runtime is not None and runtime not in self.need_runtime:
raise ValueError("About to setup runtime %s but that wouldn't "
"match explicitely allowed runtimes" % runtime)
logger.info("Installing runtime %s%s at %s",
runtime,
"" if self.setup_runtime else " (auto)",
self.queue)
# Uploads runtime
scp_client = self.get_scp_client()
filename = pkg_resources.resource_filename('tej',
'remotes/%s' % runtime)
scp_client.put(filename, str(queue), recursive=True)
logger.debug("Files uploaded")
# Runs post-setup script
self.check_call('/bin/sh %s' % shell_escape(queue / 'commands/setup'))
logger.debug("Post-setup script done")
self._queue = queue
return queue
def submit(self, job_id, directory, script=None):
"""Submits a job to the queue.
If the runtime is not there, it will be installed. If it is a broken
chain of links, error.
"""
if job_id is None:
job_id = '%s_%s_%s' % (Path(directory).unicodename,
self.destination['username'],
make_unique_name())
else:
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
queue = self._setup()
if script is None:
script = 'start.sh'
# Create directory
ret, target = self._call('%s %s' % (
shell_escape(queue / 'commands/new_job'),
job_id),
True)
if ret == 4:
raise JobAlreadyExists
elif ret != 0:
raise JobNotFound("Couldn't create job")
target = PosixPath(target)
logger.debug("Server created directory %s", target)
# Upload to directory
try:
scp_client = self.get_scp_client()
scp_client.put(str(Path(directory)),
str(target),
recursive=True)
except BaseException as e:
try:
self.delete(job_id)
except BaseException:
raise e
raise
logger.debug("Files uploaded")
# Submit job
self.check_call('%s %s %s %s' % (
shell_escape(queue / 'commands/submit'),
job_id, shell_escape(target),
shell_escape(script)))
logger.info("Submitted job %s", job_id)
return job_id
def status(self, job_id):
"""Gets the status of a previously-submitted job.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/status'),
job_id),
True)
if ret == 0:
directory, result = output.splitlines()
result = result.decode('utf-8')
return RemoteQueue.JOB_DONE, PosixPath(directory), result
elif ret == 2:
directory = output.splitlines()[0]
return RemoteQueue.JOB_RUNNING, PosixPath(directory), None
elif ret == 3:
raise JobNotFound
else:
raise RemoteCommandFailure(command="commands/status",
ret=ret)
def download(self, job_id, files, **kwargs):
"""Downloads files from server.
"""
check_jobid(job_id)
if not files:
return
if isinstance(files, string_types):
files = [files]
directory = False
recursive = kwargs.pop('recursive', True)
if 'destination' in kwargs and 'directory' in kwargs:
raise TypeError("Only use one of 'destination' or 'directory'")
elif 'destination' in kwargs:
destination = Path(kwargs.pop('destination'))
if len(files) != 1:
raise ValueError("'destination' specified but multiple files "
"given; did you mean to use 'directory'?")
elif 'directory' in kwargs:
destination = Path(kwargs.pop('directory'))
directory = True
if kwargs:
raise TypeError("Got unexpected keyword arguments")
# Might raise JobNotFound
status, target, result = self.status(job_id)
scp_client = self.get_scp_client()
for filename in files:
logger.info("Downloading %s", target / filename)
if directory:
scp_client.get(str(target / filename),
str(destination / filename),
recursive=recursive)
else:
scp_client.get(str(target / filename),
str(destination),
recursive=recursive)
def kill(self, job_id):
"""Kills a job on the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/kill'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret != 0:
raise RemoteCommandFailure(command='commands/kill',
ret=ret)
def delete(self, job_id):
"""Deletes a job from the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/delete'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret == 2:
raise JobStillRunning
elif ret != 0:
raise RemoteCommandFailure(command='commands/delete',
ret=ret)
def list(self):
"""Lists the jobs on the server.
"""
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
output = self.check_output('%s' %
shell_escape(queue / 'commands/list'))
job_id, info = None, None
for line in output.splitlines():
line = line.decode('utf-8')
if line.startswith(' '):
key, value = line[4:].split(': ', 1)
info[key] = value
else:
if job_id is not None:
yield job_id, info
job_id = line
info = {}
if job_id is not None:
yield job_id, info
def cleanup(self, kill=False):
queue = self._get_queue()
if queue is not None:
# Kill jobs
for job_id, info in self.list():
if info['status'] == 'running':
if not kill:
raise JobStillRunning("Can't cleanup, some jobs are "
"still running")
else:
logger.info("Killing running job %s", job_id)
self.kill(job_id)
# Remove queue
logger.info("Removing queue at %s", queue)
self.check_call('rm -rf -- %s' % shell_escape(queue))
# Remove links
for link in self._links:
self.check_call('rm -rf -- %s' % shell_escape(link))
return True
|
VisTrails/tej | tej/submission.py | RemoteQueue._connect | python | def _connect(self):
ssh = self._ssh_client()
logger.debug("Connecting with %s",
', '.join('%s=%r' % (k, v if k != "password" else "***")
for k, v in iteritems(self.destination)))
ssh.connect(**self.destination)
logger.debug("Connected to %s", self.destination['hostname'])
self._ssh = ssh | Connects via SSH. | train | https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L209-L218 | [
"def _ssh_client(self):\n \"\"\"Gets an SSH client to connect with.\n \"\"\"\n ssh = paramiko.SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.RejectPolicy())\n return ssh\n"
] | class RemoteQueue(object):
JOB_DONE = 'finished'
JOB_RUNNING = 'running'
JOB_INCOMPLETE = 'incomplete'
JOB_CREATED = 'created'
PROTOCOL_VERSION = 0, 2
def __init__(self, destination, queue,
setup_runtime=None, need_runtime=None):
"""Creates a queue object, that represents a job queue on a server.
:param destination: The address of the server, used to SSH into it.
:param queue: The pathname of the queue on the remote server. Something
like "~/.tej" is usually adequate. This will contain both the job info
and files, and the scripts used to manage it on the server side.
:param setup_runtime: The name of the runtime to deploy on the server
if the queue doesn't already exist. If None (default), it will
auto-detect what is appropriate (currently, `pbs` if the ``qsub``
command is available), and fallback on `default`. If `need_runtime` is
set, this should be one of the accepted values.
:param need_runtime: A list of runtime names that are acceptable. If
the queue already exists on the server and this argument is not None,
the installed runtime will be matched against it, and a failure will be
reported if it is not one of the provided values.
"""
if isinstance(destination, string_types):
self.destination = parse_ssh_destination(destination)
else:
if 'hostname' not in destination:
raise InvalidDestination("destination dictionary is missing "
"hostname")
self.destination = destination
if setup_runtime not in (None, 'default', 'pbs'):
raise ValueError("Selected runtime %r is unknown" % setup_runtime)
self.setup_runtime = setup_runtime
if need_runtime is not None:
self.need_runtime = set(need_runtime)
else:
self.need_runtime = None
self.queue = PosixPath(queue)
self._queue = None
self._ssh = None
self._connect()
def server_logger(self):
"""Handles messages from the server.
By default, uses getLogger('tej.server').warning(). Override this in
subclasses to provide your own mechanism.
"""
return ServerLogger()
@property
def destination_string(self):
return destination_as_string(self.destination)
def _ssh_client(self):
"""Gets an SSH client to connect with.
"""
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.RejectPolicy())
return ssh
def get_client(self):
"""Gets the SSH client.
This will check that the connection is still alive first, and reconnect
if necessary.
"""
if self._ssh is None:
self._connect()
return self._ssh
else:
try:
chan = self._ssh.get_transport().open_session()
except (socket.error, paramiko.SSHException):
logger.warning("Lost connection, reconnecting...")
self._ssh.close()
self._connect()
else:
chan.close()
return self._ssh
def get_scp_client(self):
return scp.SCPClient(self.get_client().get_transport())
def _call(self, cmd, get_output):
"""Calls a command through the SSH connection.
Remote stderr gets printed to this program's stderr. Output is captured
and may be returned.
"""
server_err = self.server_logger()
chan = self.get_client().get_transport().open_session()
try:
logger.debug("Invoking %r%s",
cmd, " (stdout)" if get_output else "")
chan.exec_command('/bin/sh -c %s' % shell_escape(cmd))
output = b''
while True:
r, w, e = select.select([chan], [], [])
if chan not in r:
continue # pragma: no cover
recvd = False
while chan.recv_stderr_ready():
data = chan.recv_stderr(1024)
server_err.append(data)
recvd = True
while chan.recv_ready():
data = chan.recv(1024)
if get_output:
output += data
recvd = True
if not recvd and chan.exit_status_ready():
break
output = output.rstrip(b'\r\n')
return chan.recv_exit_status(), output
finally:
server_err.done()
chan.close()
def check_call(self, cmd):
"""Calls a command through SSH.
"""
ret, _ = self._call(cmd, False)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output
def _resolve_queue(self, queue, depth=0, links=None):
"""Finds the location of tej's queue directory on the server.
The `queue` set when constructing this `RemoteQueue` might be relative
to the home directory and might contain ``~user`` placeholders. Also,
each queue may in fact be a link to another path (a file containing
the string ``tejdir:``, a space, and a new pathname, relative to this
link's location).
"""
if depth == 0:
logger.debug("resolve_queue(%s)", queue)
answer = self.check_output(
'if [ -d %(queue)s ]; then '
' cd %(queue)s; echo "dir"; cat version; pwd; '
'elif [ -f %(queue)s ]; then '
' cat %(queue)s; '
'else '
' echo no; '
'fi' % {
'queue': escape_queue(queue)})
if answer == b'no':
if depth > 0:
logger.debug("Broken link at depth=%d", depth)
else:
logger.debug("Path doesn't exist")
return None, depth
elif answer.startswith(b'dir\n'):
version, runtime, path = answer[4:].split(b'\n', 2)
try:
version = tuple(int(e)
for e in version.decode('ascii', 'ignore')
.split('.'))
except ValueError:
version = 0, 0
if version[:2] != self.PROTOCOL_VERSION:
raise QueueExists(
msg="Queue exists and is using incompatible protocol "
"version %s" % '.'.join('%s' % e for e in version))
path = PosixPath(path)
runtime = runtime.decode('ascii', 'replace')
if self.need_runtime is not None:
if (self.need_runtime is not None and
runtime not in self.need_runtime):
raise QueueExists(
msg="Queue exists and is using explicitely disallowed "
"runtime %s" % runtime)
logger.debug("Found directory at %s, depth=%d, runtime=%s",
path, depth, runtime)
return path, depth
elif answer.startswith(b'tejdir: '):
new = queue.parent / answer[8:]
logger.debug("Found link to %s, recursing", new)
if links is not None:
links.append(queue)
return self._resolve_queue(new, depth + 1)
else: # pragma: no cover
logger.debug("Server returned %r", answer)
raise RemoteCommandFailure(msg="Queue resolution command failed "
"in unexpected way")
def _get_queue(self):
"""Gets the actual location of the queue, or None.
"""
if self._queue is None:
self._links = []
queue, depth = self._resolve_queue(self.queue, links=self._links)
if queue is None and depth > 0:
raise QueueLinkBroken
self._queue = queue
return self._queue
def setup(self, links=None, force=False, only_links=False):
"""Installs the runtime at the target location.
This will not replace an existing installation, unless `force` is True.
After installation, creates links to this installation at the specified
locations.
"""
if not links:
links = []
if only_links:
logger.info("Only creating links")
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(self.queue),
'link': escape_queue(link)})
return
queue, depth = self._resolve_queue(self.queue)
if queue is not None or depth > 0:
if force:
if queue is None:
logger.info("Replacing broken link")
elif depth > 0:
logger.info("Replacing link to %s...", queue)
else:
logger.info("Replacing existing queue...")
self.check_call('rm -Rf %s' % escape_queue(self.queue))
else:
if queue is not None and depth > 0:
raise QueueExists("Queue already exists (links to %s)\n"
"Use --force to replace" % queue)
elif depth > 0:
raise QueueExists("Broken link exists\n"
"Use --force to replace")
else:
raise QueueExists("Queue already exists\n"
"Use --force to replace")
queue = self._setup()
for link in links:
self.check_call('echo "tejdir:" %(queue)s > %(link)s' % {
'queue': escape_queue(queue),
'link': escape_queue(link)})
def _setup(self):
"""Actually installs the runtime.
"""
# Expands ~user in queue
if self.queue.path[0:1] == b'/':
queue = self.queue
else:
if self.queue.path[0:1] == b'~':
output = self.check_output('echo %s' %
escape_queue(self.queue))
queue = PosixPath(output.rstrip(b'\r\n'))
else:
output = self.check_output('pwd')
queue = PosixPath(output.rstrip(b'\r\n')) / self.queue
logger.debug("Resolved to %s", queue)
# Select runtime
if not self.setup_runtime:
# Autoselect
if self._call('which qsub', False)[0] == 0:
logger.debug("qsub is available, using runtime 'pbs'")
runtime = 'pbs'
else:
logger.debug("qsub not found, using runtime 'default'")
runtime = 'default'
else:
runtime = self.setup_runtime
if self.need_runtime is not None and runtime not in self.need_runtime:
raise ValueError("About to setup runtime %s but that wouldn't "
"match explicitely allowed runtimes" % runtime)
logger.info("Installing runtime %s%s at %s",
runtime,
"" if self.setup_runtime else " (auto)",
self.queue)
# Uploads runtime
scp_client = self.get_scp_client()
filename = pkg_resources.resource_filename('tej',
'remotes/%s' % runtime)
scp_client.put(filename, str(queue), recursive=True)
logger.debug("Files uploaded")
# Runs post-setup script
self.check_call('/bin/sh %s' % shell_escape(queue / 'commands/setup'))
logger.debug("Post-setup script done")
self._queue = queue
return queue
def submit(self, job_id, directory, script=None):
"""Submits a job to the queue.
If the runtime is not there, it will be installed. If it is a broken
chain of links, error.
"""
if job_id is None:
job_id = '%s_%s_%s' % (Path(directory).unicodename,
self.destination['username'],
make_unique_name())
else:
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
queue = self._setup()
if script is None:
script = 'start.sh'
# Create directory
ret, target = self._call('%s %s' % (
shell_escape(queue / 'commands/new_job'),
job_id),
True)
if ret == 4:
raise JobAlreadyExists
elif ret != 0:
raise JobNotFound("Couldn't create job")
target = PosixPath(target)
logger.debug("Server created directory %s", target)
# Upload to directory
try:
scp_client = self.get_scp_client()
scp_client.put(str(Path(directory)),
str(target),
recursive=True)
except BaseException as e:
try:
self.delete(job_id)
except BaseException:
raise e
raise
logger.debug("Files uploaded")
# Submit job
self.check_call('%s %s %s %s' % (
shell_escape(queue / 'commands/submit'),
job_id, shell_escape(target),
shell_escape(script)))
logger.info("Submitted job %s", job_id)
return job_id
def status(self, job_id):
"""Gets the status of a previously-submitted job.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/status'),
job_id),
True)
if ret == 0:
directory, result = output.splitlines()
result = result.decode('utf-8')
return RemoteQueue.JOB_DONE, PosixPath(directory), result
elif ret == 2:
directory = output.splitlines()[0]
return RemoteQueue.JOB_RUNNING, PosixPath(directory), None
elif ret == 3:
raise JobNotFound
else:
raise RemoteCommandFailure(command="commands/status",
ret=ret)
def download(self, job_id, files, **kwargs):
"""Downloads files from server.
"""
check_jobid(job_id)
if not files:
return
if isinstance(files, string_types):
files = [files]
directory = False
recursive = kwargs.pop('recursive', True)
if 'destination' in kwargs and 'directory' in kwargs:
raise TypeError("Only use one of 'destination' or 'directory'")
elif 'destination' in kwargs:
destination = Path(kwargs.pop('destination'))
if len(files) != 1:
raise ValueError("'destination' specified but multiple files "
"given; did you mean to use 'directory'?")
elif 'directory' in kwargs:
destination = Path(kwargs.pop('directory'))
directory = True
if kwargs:
raise TypeError("Got unexpected keyword arguments")
# Might raise JobNotFound
status, target, result = self.status(job_id)
scp_client = self.get_scp_client()
for filename in files:
logger.info("Downloading %s", target / filename)
if directory:
scp_client.get(str(target / filename),
str(destination / filename),
recursive=recursive)
else:
scp_client.get(str(target / filename),
str(destination),
recursive=recursive)
def kill(self, job_id):
"""Kills a job on the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/kill'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret != 0:
raise RemoteCommandFailure(command='commands/kill',
ret=ret)
def delete(self, job_id):
"""Deletes a job from the server.
"""
check_jobid(job_id)
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
ret, output = self._call('%s %s' % (
shell_escape(queue / 'commands/delete'),
job_id),
False)
if ret == 3:
raise JobNotFound
elif ret == 2:
raise JobStillRunning
elif ret != 0:
raise RemoteCommandFailure(command='commands/delete',
ret=ret)
def list(self):
"""Lists the jobs on the server.
"""
queue = self._get_queue()
if queue is None:
raise QueueDoesntExist
output = self.check_output('%s' %
shell_escape(queue / 'commands/list'))
job_id, info = None, None
for line in output.splitlines():
line = line.decode('utf-8')
if line.startswith(' '):
key, value = line[4:].split(': ', 1)
info[key] = value
else:
if job_id is not None:
yield job_id, info
job_id = line
info = {}
if job_id is not None:
yield job_id, info
def cleanup(self, kill=False):
queue = self._get_queue()
if queue is not None:
# Kill jobs
for job_id, info in self.list():
if info['status'] == 'running':
if not kill:
raise JobStillRunning("Can't cleanup, some jobs are "
"still running")
else:
logger.info("Killing running job %s", job_id)
self.kill(job_id)
# Remove queue
logger.info("Removing queue at %s", queue)
self.check_call('rm -rf -- %s' % shell_escape(queue))
# Remove links
for link in self._links:
self.check_call('rm -rf -- %s' % shell_escape(link))
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.