code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
window = pyglet.window.Window()
image = pyglet.resource.image('kitten.jpg')
@window.event
def on_draw():
window.clear()
image.blit(0, 0)
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Demonstrates a useful pattern for pyglet applications: subclassing Window.
'''
import pyglet
class HelloWorldWindow(pyglet.window.Window):
def __init__(self):
super(HelloWorldWindow, self).__init__()
self.label = pyglet.text.Label('Hello, world!')
def on_draw(self):
self.clear()
self.label.draw()
if __name__ == '__main__':
window = HelloWorldWindow()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load and display a GIF animation.
Usage::
animation.py [<filename>]
If the filename is omitted, a sample animation is loaded
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
# The dinosaur.gif file packaged alongside this script is in the public
# domain, it was obtained from http://www.gifanimations.com/.
import sys
import pyglet
if len(sys.argv) > 1:
# Load the animation from file path.
animation = pyglet.image.load_animation(sys.argv[1])
bin = pyglet.image.atlas.TextureBin()
animation.add_to_texture_bin(bin)
else:
# Load animation from resource (this script's directory).
animation = pyglet.resource.animation('dinosaur.gif')
sprite = pyglet.sprite.Sprite(animation)
window = pyglet.window.Window(width=sprite.width, height=sprite.height)
# Set window background color to white.
pyglet.gl.glClearColor(1, 1, 1, 1)
@window.event
def on_draw():
window.clear()
sprite.draw()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
from pyglet.window import key
from pyglet.window import mouse
window = pyglet.window.Window()
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.A:
print 'The "A" key was pressed.'
elif symbol == key.LEFT:
print 'The left arrow key was pressed.'
elif symbol == key.ENTER:
print 'The enter key was pressed.'
@window.event
def on_mouse_press(x, y, button, modifiers):
if button == mouse.LEFT:
print 'The left mouse button was pressed.'
@window.event
def on_draw():
window.clear()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
window = pyglet.window.Window()
image = pyglet.resource.image('kitten.jpg')
@window.event
def on_draw():
window.clear()
image.blit(0, 0)
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load and display a GIF animation.
Usage::
animation.py [<filename>]
If the filename is omitted, a sample animation is loaded
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
# The dinosaur.gif file packaged alongside this script is in the public
# domain, it was obtained from http://www.gifanimations.com/.
import sys
import pyglet
if len(sys.argv) > 1:
# Load the animation from file path.
animation = pyglet.image.load_animation(sys.argv[1])
bin = pyglet.image.atlas.TextureBin()
animation.add_to_texture_bin(bin)
else:
# Load animation from resource (this script's directory).
animation = pyglet.resource.animation('dinosaur.gif')
sprite = pyglet.sprite.Sprite(animation)
window = pyglet.window.Window(width=sprite.width, height=sprite.height)
# Set window background color to white.
pyglet.gl.glClearColor(1, 1, 1, 1)
@window.event
def on_draw():
window.clear()
sprite.draw()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Demonstrates a useful pattern for pyglet applications: subclassing Window.
'''
import pyglet
class HelloWorldWindow(pyglet.window.Window):
def __init__(self):
super(HelloWorldWindow, self).__init__()
self.label = pyglet.text.Label('Hello, world!')
def on_draw(self):
self.clear()
self.label.draw()
if __name__ == '__main__':
window = HelloWorldWindow()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''A full-screen minute:second timer. Leave it in charge of your conference
lighting talks.
After 5 minutes, the timer goes red. This limit is easily adjustable by
hacking the source code.
Press spacebar to start, stop and reset the timer.
'''
import pyglet
window = pyglet.window.Window(fullscreen=True)
class Timer(object):
def __init__(self):
self.label = pyglet.text.Label('00:00', font_size=360,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
self.reset()
def reset(self):
self.time = 0
self.running = False
self.label.text = '00:00'
self.label.color = (255, 255, 255, 255)
def update(self, dt):
if self.running:
self.time += dt
m, s = divmod(self.time, 60)
self.label.text = '%02d:%02d' % (m, s)
if m >= 5:
self.label.color = (180, 0, 0, 255)
@window.event
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.SPACE:
if timer.running:
timer.running = False
else:
if timer.time > 0:
timer.reset()
else:
timer.running = True
elif symbol == pyglet.window.key.ESCAPE:
window.close()
@window.event
def on_draw():
window.clear()
timer.label.draw()
timer = Timer()
pyglet.clock.schedule_interval(timer.update, 1)
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Demonstrates how to handle a platform-specific event not defined in
pyglet by subclassing Window. This is not for the faint-hearted!
A message will be printed to stdout when the following events are caught:
- On Mac OS X, the window drag region is clicked.
- On Windows, the display resolution is changed.
- On Linux, the window properties are changed.
'''
import pyglet
# Check for Carbon (OS X)
try:
from pyglet.window.carbon import *
_have_carbon = True
except ImportError:
_have_carbon = False
# Check for Win32
try:
from pyglet.window.win32 import *
from pyglet.window.win32.constants import *
_have_win32 = True
except ImportError:
_have_win32 = False
# Check for Xlib (Linux)
try:
from pyglet.window.xlib import *
_have_xlib = True
except ImportError:
_have_xlib = False
# Subclass Window
class MyWindow(pyglet.window.Window):
if _have_carbon:
@CarbonEventHandler(kEventClassWindow, kEventWindowClickDragRgn)
def _on_window_click_drag_rgn(self, next_handler, event, data):
print 'Clicked drag rgn.'
carbon.CallNextEventHandler(next_handler, event)
return noErr
if _have_win32:
@Win32EventHandler(WM_DISPLAYCHANGE)
def _on_window_display_change(self, msg, lParam, wParam):
print 'Display resolution changed.'
return 0
if _have_xlib:
@XlibEventHandler(xlib.PropertyNotify)
def _on_window_property_notify(self, event):
print 'Property notify.'
if __name__ == '__main__':
window = MyWindow()
pyglet.app.run()
| Python |
#!/usr/bin/env python
'''Graphically show all devices available via the pyglet.input interface.
Each device is shown in its own collapsed panel. Click on a device panel
to expand it, revealing that device's controls. The controls show the
current live values, and flash white when the value changes.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
from pyglet import gl
class LineGroup(pyglet.graphics.OrderedGroup):
def set_state(self):
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
def unset_state(self):
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
class Box(object):
def __init__(self, batch, group=None,
stroke_color=(255, 255, 255, 255),
fill_color=(200, 200, 200, 255)):
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
self.fill_vertices = batch.add(4, gl.GL_QUADS,
pyglet.graphics.OrderedGroup(0, group),
'v2f', ('c4B', fill_color * 4))
self.stroke_vertices = batch.add(4, gl.GL_QUADS,
LineGroup(1, group),
'v2f', ('c4B', stroke_color * 4))
def set_bounds(self, x1, y1, x2, y2):
self.x1 = 0
self.y1 = 0
self.x2 = 0
self.y2 = 0
self.fill_vertices.vertices[:] = (x1, y1, x2, y1, x2, y2, x1, y2)
self.stroke_vertices.vertices[:] = (x1, y1, x2, y1, x2, y2, x1-1, y2)
def set_fill(self, r, g, b):
self.fill_vertices.colors[:] = (r, g, b, 255) * 4
def delete(self):
self.fill_vertices.delete()
self.stroke_vertices.delete()
class DevicePanel(object):
BORDER_MARGIN = 5
CONTENT_MARGIN = 8
def __init__(self, device):
self.device = device
self.box = Box(batch, group=background_group,
stroke_color=(0, 0, 200, 255),
fill_color=(200, 200, 255, 255))
self.name_label = pyglet.text.Label(device.name or 'Unknown device',
font_size=10,
color=(0, 0, 0, 255),
anchor_y='top',
batch=batch, group=text_group)
self.manufacturer_label = pyglet.text.Label(device.manufacturer or '',
font_size=10,
color=(0, 0, 0, 255), anchor_x='right', anchor_y='top',
batch=batch, group=text_group)
self.is_open = False
self.widgets = []
def set_bounds(self, left, right, top):
self.left = left
self.right = right
self.top = top
self.layout()
def layout_widgets(self):
max_row_width = self.right - self.left - self.CONTENT_MARGIN * 2
row = []
row_width = 0
row_height = 0
def layout_row(row, x1, y1, x2, y2):
x = x1
for widget in row:
widget.set_bounds(x,
y1,
x + widget.min_width,
y1 + widget.min_height)
x += widget.min_width
y = self.bottom + self.CONTENT_MARGIN
for widget in self.widgets:
if widget is None or row_width + widget.min_width > max_row_width:
layout_row(row,
self.left + self.CONTENT_MARGIN,
y - row_height,
self.right - self.CONTENT_MARGIN,
y)
row = []
y -= row_height
row_width = 0
if widget is None:
break
row.append(widget)
row_width += widget.min_width
row_height = max(row_height, widget.min_height)
self.bottom = y - self.CONTENT_MARGIN
def layout(self):
self.title_bottom = self.top - \
self.name_label.content_height - self.CONTENT_MARGIN * 2
self.bottom = self.title_bottom
if self.is_open:
self.layout_widgets()
self.box.set_bounds(self.left + self.BORDER_MARGIN,
self.bottom + self.BORDER_MARGIN,
self.right - self.BORDER_MARGIN,
self.top - self.BORDER_MARGIN)
self.name_label.x = self.left + self.CONTENT_MARGIN
self.name_label.y = self.top - self.CONTENT_MARGIN
self.manufacturer_label.x = self.right - self.CONTENT_MARGIN
self.manufacturer_label.y = self.top - self.CONTENT_MARGIN
def hit_test(self, x, y):
return self.left < x < self.right and self.title_bottom < y < self.top
def toggle(self):
if self.is_open:
self.close()
else:
self.open()
def open(self):
if self.is_open:
return
try:
self.device.open()
except pyglet.input.DeviceException, e:
try:
self.device.open(window)
except pyglet.input.DeviceException, e:
print e # TODO show error
return
window.set_mouse_cursor(window.get_system_mouse_cursor('wait'))
for control in self.device.get_controls():
if isinstance(control, pyglet.input.Button):
widget = ButtonWidget(control, batch, group=text_group)
else:
widget = ControlWidget(control, batch, group=text_group)
self.widgets.append(widget)
if not self.widgets:
self.widgets.append(NoControlsWidget(batch, group=text_group))
self.widgets.append(None)
window.set_mouse_cursor(None)
self.is_open = True
def close(self):
if not self.is_open:
return
for widget in self.widgets:
if widget:
widget.delete()
del self.widgets[:]
self.device.close()
self.is_open = False
class ControlWidget(object):
BORDER_MARGIN = 2
CONTENT_MARGIN = 4
def __init__(self, control, batch, group=None):
self.control_name = control.name
if not self.control_name:
self.control_name = control.raw_name
self.box = Box(batch, pyglet.graphics.OrderedGroup(0, group))
self.name_label = pyglet.text.Label(self.control_name,
font_size=10,
anchor_x='left',
anchor_y='bottom',
color=(0, 0, 0, 255),
batch=batch,
group=pyglet.graphics.OrderedGroup(1, group))
self.value_label = pyglet.text.Label(' ',
font_size=8,
anchor_x='right',
anchor_y='bottom',
color=(0, 0, 0, 255),
batch=batch,
group=pyglet.graphics.OrderedGroup(1, group))
self.min_width = \
self.name_label.content_width + \
self.value_label.content_width + self.CONTENT_MARGIN * 2
self.min_height = self.name_label.content_height + self.CONTENT_MARGIN * 2
self.relative = isinstance(control, pyglet.input.RelativeAxis)
self.fade = 200
self.control = control
control.push_handlers(self)
def set_bounds(self, x1, y1, x2, y2):
self.box.set_bounds(
x1 + self.BORDER_MARGIN,
y1 + self.BORDER_MARGIN,
x2 - self.BORDER_MARGIN,
y2 - self.BORDER_MARGIN)
self.name_label.x = x1 + self.CONTENT_MARGIN
self.name_label.y = y1 + self.CONTENT_MARGIN
self.value_label.x = x2 - self.CONTENT_MARGIN
self.value_label.y = y1 + self.CONTENT_MARGIN
def delete(self):
if self in changed_widgets:
changed_widgets.remove(self)
self.control.remove_handlers(self)
self.name_label.delete()
self.value_label.delete()
self.box.delete()
def on_change(self, value):
self.value = value
self.fade = 255
changed_widgets.add(self)
def update(self):
self.value_label.text = str(self.value)
if self.relative and self.value:
self.value = 0
changed_widgets.add(self)
self.box.set_fill(self.fade, self.fade, self.fade)
if self.fade > 200:
self.fade = max(200, self.fade - 10)
changed_widgets.add(self)
class ButtonWidget(ControlWidget):
BORDER_MARGIN = 2
CONTENT_MARGIN = 4
def __init__(self, control, batch, group=None):
self.control_name = control.name
if not self.control_name:
self.control_name = control.raw_name
self.box = Box(batch, pyglet.graphics.OrderedGroup(0, group))
self.name_label = pyglet.text.Label(self.control_name,
font_size=10,
anchor_x='center',
anchor_y='bottom',
color=(0, 0, 0, 255),
batch=batch,
group=pyglet.graphics.OrderedGroup(1, group))
self.min_width = self.name_label.content_width + self.CONTENT_MARGIN * 2
self.min_height = self.name_label.content_height + self.CONTENT_MARGIN * 2
self.fade = 200
self.control = control
control.push_handlers(self)
def set_bounds(self, x1, y1, x2, y2):
self.box.set_bounds(
x1 + self.BORDER_MARGIN,
y1 + self.BORDER_MARGIN,
x2 - self.BORDER_MARGIN,
y2 - self.BORDER_MARGIN)
self.name_label.x = (x1 + x2) // 2
self.name_label.y = y1 + self.CONTENT_MARGIN
def delete(self):
if self in changed_widgets:
changed_widgets.remove(self)
self.control.remove_handlers(self)
self.name_label.delete()
self.box.delete()
def on_change(self, value):
self.value = value
if value:
self.fade = 255
changed_widgets.add(self)
def update(self):
self.box.set_fill(self.fade, self.fade, self.fade)
if not self.value and self.fade > 200:
self.fade = max(200, self.fade - 10)
changed_widgets.add(self)
class NoControlsWidget(object):
CONTENT_MARGIN = 4
def __init__(self, batch, group):
self.label = pyglet.text.Label('No controls on this device.',
font_size=10,
color=(0, 0, 0, 255),
anchor_y='bottom',
batch=batch,
group=group)
self.min_width = self.label.content_width + self.CONTENT_MARGIN * 2
self.min_height = self.label.content_height + self.CONTENT_MARGIN * 2
def set_bounds(self, x1, y1, x2, y2):
self.label.x = x1 + ControlWidget.CONTENT_MARGIN
self.label.y = y1 + ControlWidget.CONTENT_MARGIN
def delete(self):
self.label.delete()
window = pyglet.window.Window(caption='Input Devices', resizable=True)
batch = pyglet.graphics.Batch()
background_group = pyglet.graphics.OrderedGroup(0)
text_group = pyglet.graphics.OrderedGroup(1)
panels = [DevicePanel(device) for device in pyglet.input.get_devices()]
help_label = pyglet.text.Label(
'Click on a device name to show or hide its controls.',
x=DevicePanel.CONTENT_MARGIN,
anchor_y='top',
font_size=10,
color=(255, 255, 255, 255),
batch=batch,
group=background_group)
def layout_panels():
y = window.height
for panel in panels:
panel.set_bounds(left=0, right=window.width, top=y)
y = panel.bottom
help_label.y = y
@window.event
def on_draw():
gl.glClearColor(0.3, 0.3, 0.4, 1.0)
window.clear()
batch.draw()
window.invalid = False
@window.event
def on_resize(width, height):
layout_panels()
window.invalid = True
return pyglet.event.EVENT_UNHANDLED
@window.event
def on_mouse_press(x, y, button, modifiers):
for panel in panels:
if panel.hit_test(x, y):
panel.toggle()
layout_panels()
window.invalid = True
changed_widgets = set()
def update(dt):
pending = list(changed_widgets)
changed_widgets.clear()
for widget in pending:
widget.update()
window.invalid = True
pyglet.clock.schedule_interval(update, 0.05)
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Convert an image to another file format supported by pyglet.
Usage::
python image_convert.py <src-file> <dest-file>
'''
import sys
import pyglet
def convert(src, dest):
if '.dds' in src.lower():
# Compressed textures need to be uploaded to the video card before
# they can be saved.
texture = pyglet.image.load(src).get_texture()
texture.save(dest)
else:
# Otherwise just save the loaded image in the new format.
image = pyglet.image.load(src)
image.save(dest)
if __name__ == '__main__':
if len(sys.argv) != 3:
print __doc__
sys.exit(1)
src = sys.argv[1]
dest = sys.argv[2]
convert(src, dest)
| Python |
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
window = pyglet.window.Window()
devices = pyglet.input.get_devices()
def watch_control(device, control):
@control.event
def on_change(value):
print '%r: %r.on_change(%r)' % (device, control, value)
if isinstance(control, pyglet.input.base.Button):
@control.event
def on_press():
print '%r: %r.on_press()' % (device, control)
@control.event
def on_release():
print '%r: %r.on_release()' % (device, control)
print 'Devices:'
for device in devices:
print ' ', device.name,
try:
device.open(window=window)
print 'OK'
for control in device.get_controls():
print ' ', control.name
watch_control(device, control)
except pyglet.input.DeviceException:
print 'Fail'
pyglet.app.run()
| Python |
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
import sys
window = pyglet.window.Window()
@window.event
def on_draw():
window.clear()
remote = pyglet.input.get_apple_remote()
if not remote:
print 'Apple IR Remote not available.'
sys.exit(0)
remote.open(window, exclusive=True)
@remote.select_control.event
def on_press():
print 'Press select'
@remote.menu_control.event
def on_press():
print 'Press menu'
@remote.up_control.event
def on_press():
print 'Press up'
@remote.down_control.event
def on_press():
print 'Press down'
@remote.left_control.event
def on_press():
print 'Press left'
@remote.right_control.event
def on_press():
print 'Press right'
@remote.select_control.event
def on_release():
print 'Release select'
@remote.menu_control.event
def on_release():
print 'Release menu'
@remote.up_control.event
def on_release():
print 'Release up'
@remote.down_control.event
def on_release():
print 'Release down'
@remote.left_control.event
def on_release():
print 'Release left'
@remote.right_control.event
def on_release():
print 'Release right'
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Demonstrates one way of fixing the display resolution to a certain
size, but rendering to the full screen.
The method used in this example is:
1. Set the OpenGL viewport to the fixed resolution
2. Render the scene using any OpenGL functions (here, just a polygon)
3. Copy the framebuffer into a texture
4. Reset the OpenGL viewport to the window (full screen) size
5. Blit the texture to the framebuffer
Recent video cards could also render the scene directly to the texture
using EXT_framebuffer_object. (This is not demonstrated in this example).
'''
from pyglet.gl import *
import pyglet
# Create a fullscreen window using the user's desktop resolution. You can
# also use this technique on ordinary resizable windows.
window = pyglet.window.Window(fullscreen=True)
# Use 320x200 fixed resolution to make the effect completely obvious. You
# can change this to a more reasonable value such as 800x600 here.
target_resolution = 320, 200
class FixedResolutionViewport(object):
def __init__(self, window, width, height, filtered=False):
self.window = window
self.width = width
self.height = height
self.texture = pyglet.image.Texture.create(width, height,
rectangle=True)
if not filtered:
# By default the texture will be bilinear filtered when scaled
# up. If requested, turn filtering off. This makes the image
# aliased, but is more suitable for pixel art.
glTexParameteri(self.texture.target,
GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(self.texture.target,
GL_TEXTURE_MIN_FILTER, GL_NEAREST)
def begin(self):
glViewport(0, 0, self.width, self.height)
self.set_fixed_projection()
def end(self):
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
self.texture.blit_into(buffer, 0, 0, 0)
glViewport(0, 0, self.window.width, self.window.height)
self.set_window_projection()
aspect_width = self.window.width / float(self.width)
aspect_height = self.window.height / float(self.height)
if aspect_width > aspect_height:
scale_width = aspect_height * self.width
scale_height = aspect_height * self.height
else:
scale_width = aspect_width * self.width
scale_height = aspect_width * self.height
x = (self.window.width - scale_width) / 2
y = (self.window.height - scale_height) / 2
glClearColor(0, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
glColor3f(1, 1, 1)
self.texture.blit(x, y, width=scale_width, height=scale_height)
def set_fixed_projection(self):
# Override this method if you need to change the projection of the
# fixed resolution viewport.
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.width, 0, self.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
def set_window_projection(self):
# This is the same as the default window projection, reprinted here
# for clarity.
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, self.window.width, 0, self.window.height, -1, 1)
glMatrixMode(GL_MODELVIEW)
target_width, target_height = target_resolution
viewport = FixedResolutionViewport(window,
target_width, target_height, filtered=False)
def draw_scene():
'''Draw the scene, assuming the fixed resolution viewport and projection
have been set up. This just draws the rotated polygon.'''
glClearColor(1, 1, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
w, h = target_resolution
glTranslatef(w//2, h//2, 0)
glRotatef(rotate, 0, 0, 1)
glColor3f(1, 0, 0)
s = min(w, h) // 3
glRectf(-s, -s, s, s)
rotate = 0
def update(dt):
global rotate
rotate += dt * 20
pyglet.clock.schedule_interval(update, 1/60.)
@window.event
def on_draw():
viewport.begin()
window.clear()
draw_scene()
viewport.end()
pyglet.app.run()
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import math
import os
import pyglet
from pyglet.gl import *
import reader
pyglet.resource.path.append('res')
pyglet.resource.reindex()
# Check for AVbin
try:
from pyglet.media import avbin
except ImportError:
raise ImportError('AVbin is required for this example, see '
'http://code.google.com/p/avbin')
def disc(r, x, y, slices=20, start=0, end=2*math.pi):
d = (end - start) / (slices - 1)
s = start
points = [(x, y)] + [(x + r * math.cos(a*d+s), y + r * math.sin(a*d+s)) \
for a in range(slices)]
points = ((GLfloat * 2) * len(points))(*points)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, points)
glDrawArrays(GL_TRIANGLE_FAN, 0, len(points))
glPopClientAttrib()
def circle(r, x, y, slices=20):
d = 2 * math.pi / slices
points = [(x + r * math.cos(a*d), y + r * math.sin(a*d)) \
for a in range(slices)]
points = ((GLfloat * 2) * len(points))(*points)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, points)
glDrawArrays(GL_LINE_LOOP, 0, len(points))
glPopClientAttrib()
def orientation_angle(orientation):
return math.atan2(orientation[2], orientation[0])
class Handle(object):
tip = ''
def __init__(self, player):
self.player = player
def hit_test(self, x, y, z):
dx, dy, dz = [a - b for a, b in zip(self.pos(), (x, y, z))]
if dx * dx + dy * dy + dz * dz < self.radius * self.radius:
return -dx, -dy, -dz
def draw(self):
pass
def begin_drag(self, window, offset):
self.win = window
self.offset = offset
return self
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self)
class LabelHandle(Handle):
def __init__(self, player):
super(LabelHandle, self).__init__(player)
self.text = pyglet.text.Label('', font_size=10, color=(0, 0, 0, 255),
anchor_y='top', anchor_x='center')
def hit_test(self, x, y, z):
return None
def draw(self):
if hasattr(self.player, 'label'):
x, _, y = self.player.position
# ech. fudge scale back to 1
mat = (GLfloat * 16)()
glGetFloatv(GL_MODELVIEW_MATRIX, mat)
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(1/mat[0], 1/mat[5], 1/mat[10])
glTranslatef(0, -5, 0)
self.text.text = self.player.label
self.text.draw()
glPopMatrix()
class PositionHandle(Handle):
tip = 'position'
radius = .3
def draw(self):
glPushMatrix()
glTranslatef(self.player.position[0], self.player.position[2], 0)
glColor3f(1, 0, 0)
glBegin(GL_TRIANGLES)
glVertex2f(0, self.radius)
glVertex2f(-self.radius * math.sqrt(3) / 2, -.5 * self.radius)
glVertex2f(self.radius * math.sqrt(3) / 2, -.5 * self.radius)
glEnd()
glPopMatrix()
def pos(self):
return self.player.position
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
pos = self.win.mouse_transform(x, y)
self.player.position = \
(pos[0] - self.offset[0],
pos[1] - self.offset[1],
pos[2] - self.offset[2])
class OrientationHandle(Handle):
radius = .1
length = 1.5
def pos(self):
x, _, z = self.player.position
dir = self.get_orientation()
sz = math.sqrt(dir[0] ** 2 + dir[1] ** 2 + dir[2] ** 2) or 1
if sz != 0:
x += dir[0] / sz * self.length
z += dir[2] / sz * self.length
return x, 0, z
def draw(self):
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
px, _, py = self.player.position
x, _, y = self.pos()
# Dashed line
glColor3f(.3, .3, .3)
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, 0x7777)
glBegin(GL_LINES)
glVertex2f(px, py)
glVertex2f(x, y)
glEnd()
# This handle (orientation)
glColor3f(1, 1, 0)
disc(self.radius, x, y)
glPopAttrib()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
self.set_orientation(
(hx - self.offset[0] - px,
hy - self.offset[1] - py,
hz - self.offset[2] - pz))
class ConeOrientationHandle(OrientationHandle):
tip = 'cone_orientation'
def get_orientation(self):
return self.player.cone_orientation
def set_orientation(self, orientation):
self.player.cone_orientation = orientation
class ForwardOrientationHandle(OrientationHandle):
tip = 'forward_orientation'
def get_orientation(self):
return self.player.forward_orientation
def set_orientation(self, orientation):
self.player.forward_orientation = orientation
class ConeAngleHandle(Handle):
radius = .1
def pos(self):
px, py, pz = self.player.position
angle = orientation_angle(self.player.cone_orientation)
angle += self.get_angle() * math.pi / 180. / 2
x = math.cos(angle) * self.length
z = math.sin(angle) * self.length
return px + x, py, pz + z
def draw(self):
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
# Fill
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4f(*self.fill_color)
px, _, py = self.player.position
angle = orientation_angle(self.player.cone_orientation)
a = self.get_angle() * math.pi / 180.
disc(self.length, px, py,
start=angle - a/2,
end=angle + a/2)
# Handle
x, _, y = self.pos()
glColor4f(*self.color)
disc(self.radius, x, y)
glPopAttrib()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
angle = orientation_angle(self.player.cone_orientation)
hangle = orientation_angle((hx - px, hy - py, hz - pz))
if hangle < angle:
hangle += math.pi * 2
res = min(max((hangle - angle) * 2, 0), math.pi * 2)
self.set_angle(res * 180. / math.pi)
class ConeInnerAngleHandle(ConeAngleHandle):
tip = 'cone_inner_angle'
length = 1.
color = (.2, .8, .2, 1)
fill_color = (0, 1, 0, .1)
def get_angle(self):
return self.player.cone_inner_angle
def set_angle(self, angle):
self.player.cone_inner_angle = angle
class ConeOuterAngleHandle(ConeAngleHandle):
tip = 'cone_outer_angle'
length = 1.2
color = (.2, .2, .8, 1)
fill_color = (0, 0, 1, .1)
def get_angle(self):
return self.player.cone_outer_angle
def set_angle(self, angle):
self.player.cone_outer_angle = angle
class MoreHandle(Handle):
tip = 'More...'
radius = .2
open = False
open_width = 1.5
open_height = 1.5
def pos(self):
x, y, z = self.player.position
return x + 1, y, z + 1
def draw(self):
x, _, z = self.pos()
if self.open:
x -= .2
z += .2
glPushAttrib(GL_ENABLE_BIT)
glEnable(GL_BLEND)
glColor4f(1, 1, 1, .8)
glBegin(GL_QUADS)
glVertex2f(x, z)
glVertex2f(x + self.open_width, z)
glVertex2f(x + self.open_width, z - self.open_height)
glVertex2f(x, z - self.open_height)
glEnd()
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glColor4f(0, 0, 0, 1)
glBegin(GL_QUADS)
glVertex2f(x, z)
glVertex2f(x + self.open_width, z)
glVertex2f(x + self.open_width, z - self.open_height)
glVertex2f(x, z - self.open_height)
glEnd()
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopAttrib()
else:
glColor3f(1, 1, 1)
disc(self.radius, x, z)
glColor3f(0, 0, 0)
circle(self.radius, x, z)
r = self.radius - 0.1
glBegin(GL_LINES)
glVertex2f(x - r, z)
glVertex2f(x + r, z)
glVertex2f(x, z - r)
glVertex2f(x, z + r)
glEnd()
def begin_drag(self, window, offset):
self.open = True
self.win = window
self.win.set_more_player_handles(self.player)
return self
def on_mouse_press(self, x, y, button, modifiers):
x, y, z = self.win.mouse_transform(x, y)
for handle in self.win.more_handles:
if handle.hit_test(x, y, z):
return
self.win.set_more_player_handles(None)
self.win.remove_handlers(self)
self.open = False
def on_mouse_release(self, x, y, button, modifiers):
pass
class SliderHandle(Handle):
length = 1.
width = .05
radius = .1
def __init__(self, player, x, z):
super(SliderHandle, self).__init__(player)
self.x = x
self.z = z
def pos(self):
x, y, z = self.player.position
x += self.x + self.get_value() * self.length
z += self.z
return x, y, z
def draw(self):
x = self.x + self.player.position[0]
z = self.z + self.player.position[2]
# Groove
glColor3f(.5, .5, .5)
glBegin(GL_QUADS)
glVertex2f(x, z - self.width/2)
glVertex2f(x + self.length, z - self.width/2)
glVertex2f(x + self.length, z + self.width/2)
glVertex2f(x, z + self.width/2)
glEnd()
# Thumb
x, _, z = self.pos()
glColor3f(.2, .2, .2)
disc(self.radius, x, z)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
value = float(hx - px - self.x) / self.length
value = min(max(value, 0), 1)
self.set_value(value)
class VolumeHandle(SliderHandle):
tip = 'volume'
def __init__(self, player):
super(VolumeHandle, self).__init__(player, 1, .9)
def get_value(self):
return self.player.volume
def set_value(self, value):
self.player.volume = value
class ListenerVolumeHandle(SliderHandle):
tip = 'volume'
def __init__(self, player):
super(ListenerVolumeHandle, self).__init__(player, -.5, -1)
def get_value(self):
return self.player.volume
def set_value(self, value):
self.player.volume = value
class MinDistanceHandle(SliderHandle):
tip = 'min_distance'
def __init__(self, player):
super(MinDistanceHandle, self).__init__(player, 1, .6)
def get_value(self):
return self.player.min_distance / 5.
def set_value(self, value):
self.player.min_distance = value * 5.
class MaxDistanceHandle(SliderHandle):
tip = 'max_distance'
def __init__(self, player):
super(MaxDistanceHandle, self).__init__(player, 1, .3)
def get_value(self):
return min(self.player.max_distance / 5., 1.0)
def set_value(self, value):
self.player.max_distance = value * 5.
class ConeOuterGainHandle(SliderHandle):
tip = 'cone_outer_gain'
def __init__(self, player):
super(ConeOuterGainHandle, self).__init__(player, 1, 0)
def get_value(self):
return self.player.cone_outer_gain
def set_value(self, value):
self.player.cone_outer_gain = value
class SoundSpaceWindow(pyglet.window.Window):
def __init__(self, **kwargs):
kwargs.update(dict(
caption='Sound Space',
resizable=True,
))
super(SoundSpaceWindow, self).__init__(**kwargs)
self.players = []
self.handles = []
self.more_handles = []
listener = pyglet.media.get_audio_driver().get_listener()
self.handles.append(PositionHandle(listener))
self.handles.append(ForwardOrientationHandle(listener))
self.handles.append(ListenerVolumeHandle(listener))
self.handles.append(LabelHandle(listener))
self.tip = pyglet.text.Label('', font_size=10, color=(0, 0, 0, 255),
anchor_y='top', anchor_x='center')
self.tip_player = None
# pixels per unit
self.zoom = 40
self.tx = self.width/2
self.ty = self.height/2
def add_player(self, player):
self.players.append(player)
self.handles.append(PositionHandle(player))
self.handles.append(ConeOrientationHandle(player))
self.handles.append(ConeInnerAngleHandle(player))
self.handles.append(ConeOuterAngleHandle(player))
self.handles.append(LabelHandle(player))
self.handles.append(MoreHandle(player))
def set_more_player_handles(self, player):
if player:
self.more_handles = [
VolumeHandle(player),
MinDistanceHandle(player),
MaxDistanceHandle(player),
ConeOuterGainHandle(player),
]
else:
self.more_handles = []
def draw_background(self):
glLoadIdentity()
glPushAttrib(GL_CURRENT_BIT)
glColor3f(1, 1, 1)
glBegin(GL_LINES)
for i in range(0, self.width, self.zoom):
glVertex2f(i, 0)
glVertex2f(i, self.height)
for i in range(0, self.height, self.zoom):
glVertex2f(0, i)
glVertex2f(self.width, i)
glEnd()
glPopAttrib()
def camera_transform(self):
glLoadIdentity()
glTranslatef(self.tx, self.ty, 0)
glScalef(self.zoom, self.zoom, 1)
def mouse_transform(self, x, y):
return (float(x - self.tx) / self.zoom,
0,
float(y - self.ty) / self.zoom)
def player_transform(self, player):
return (player.position[0] * self.zoom + self.tx,
player.position[2] * self.zoom + self.ty)
def hit_test(self, mouse_x, mouse_y):
x, y, z = self.mouse_transform(mouse_x, mouse_y)
for handle in self.more_handles[::-1] + self.handles[::-1]:
offset = handle.hit_test(x, y, z)
if offset:
return handle, offset
return None, None
def on_draw(self):
glClearColor(.8, .8, .8, 1)
self.clear()
self.draw_background()
glPushMatrix()
self.camera_transform()
for handle in self.handles + self.more_handles:
handle.draw()
glPopMatrix()
if self.tip_player:
player_pos = self.player_transform(self.tip_player)
self.tip.x = player_pos[0]
self.tip.y = player_pos[1] - 15
self.tip.draw()
def on_mouse_scroll(self, x, y, dx, dy):
self.zoom += dy * 10
self.zoom = min(max(self.zoom, 10), 100)
def on_mouse_press(self, x, y, button, modifiers):
handle, offset = self.hit_test(x, y)
if handle:
self.push_handlers(handle.begin_drag(self, offset))
else:
self.push_handlers(PanView(self))
def on_mouse_motion(self, x, y, dx, dy):
handle, offset = self.hit_test(x, y)
if handle:
self.tip.text = handle.tip
pos = self.player_transform(handle.player)
self.tip_player = handle.player
else:
self.tip.text = ''
class PanView(object):
def __init__(self, window):
self.win = window
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.win.tx += dx
self.win.ty += dy
if __name__ == '__main__':
# We swap Y and Z, moving to left-handed system
listener = pyglet.media.get_audio_driver().get_listener()
listener.up_orientation = (0, -1, 0)
# Start facing up (er, forwards)
listener.forward_orientation = (0, 0, 1)
listener.label = 'Listener'
w = SoundSpaceWindow()
r = reader.SpaceReader(w)
r.read(pyglet.resource.file('space.txt'))
player_group = pyglet.media.PlayerGroup(w.players)
player_group.play()
pyglet.app.run()
| Python |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import os
import math
from pyglet import media
class PlayerReader(object):
def __init__(self, player):
self.player = player
def line(self, line, lineno):
parts = line.split()
if parts[0] == 'position':
if len(parts) < 4:
raise ReaderException('Invalid position line %d' % lineno)
self.player.position = tuple([float(x) for x in parts[1:]])
if parts[0] == 'cone_orientation':
if len(parts) < 4:
raise ReaderException('Invalid orientation line %d' % lineno)
self.player.cone_orientation = tuple([float(x) for x in parts[1:]])
elif parts[0] == 'outer_cone_angle':
if len(parts) < 2:
raise ReaderException('Invalid angle line %d' % lineno)
self.player.cone_outer_angle = float(parts[1])
elif parts[0] == 'inner_cone_angle':
if len(parts) < 2:
raise ReaderException('Invalid angle line %d' % lineno)
self.player.cone_inner_angle = float(parts[1])
elif parts[0] == 'label':
if len(parts) < 2:
raise ReaderException('Invalid label line %d' % lineno)
self.player.label = parts[1]
class SpaceReader(object):
def __init__(self, space):
self.basedir = ''
self.space = space
def read(self, file):
if not hasattr(file, 'read'):
self.basedir = os.path.dirname(file)
file = open(file, 'rt')
elif hasattr(file, 'name'):
self.basedir = os.path.dirname(file.name)
reader = None
lineno = 0
for line in file:
lineno += 1
if not line.strip() or line.startswith('#'):
continue
if line.startswith(' '):
if not reader:
raise ReaderException(
'Unexpected indented block line %d' % lineno)
reader.line(line, lineno)
else:
reader = None
parts = line.split()
if parts[0] == 'loop':
if len(parts) < 2:
raise ReaderException(
'No loop filename line %d' % lineno)
player = media.Player()
player.eos_action = 'loop'
player.queue(self.source(parts[1], streaming=False))
self.space.add_player(player)
reader = PlayerReader(player)
def source(self, filename, **kwargs):
filename = os.path.join(self.basedir, filename)
return media.load(filename, **kwargs)
| Python |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import math
import os
import pyglet
from pyglet.gl import *
import reader
pyglet.resource.path.append('res')
pyglet.resource.reindex()
# Check for AVbin
try:
from pyglet.media import avbin
except ImportError:
raise ImportError('AVbin is required for this example, see '
'http://code.google.com/p/avbin')
def disc(r, x, y, slices=20, start=0, end=2*math.pi):
d = (end - start) / (slices - 1)
s = start
points = [(x, y)] + [(x + r * math.cos(a*d+s), y + r * math.sin(a*d+s)) \
for a in range(slices)]
points = ((GLfloat * 2) * len(points))(*points)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, points)
glDrawArrays(GL_TRIANGLE_FAN, 0, len(points))
glPopClientAttrib()
def circle(r, x, y, slices=20):
d = 2 * math.pi / slices
points = [(x + r * math.cos(a*d), y + r * math.sin(a*d)) \
for a in range(slices)]
points = ((GLfloat * 2) * len(points))(*points)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(2, GL_FLOAT, 0, points)
glDrawArrays(GL_LINE_LOOP, 0, len(points))
glPopClientAttrib()
def orientation_angle(orientation):
return math.atan2(orientation[2], orientation[0])
class Handle(object):
tip = ''
def __init__(self, player):
self.player = player
def hit_test(self, x, y, z):
dx, dy, dz = [a - b for a, b in zip(self.pos(), (x, y, z))]
if dx * dx + dy * dy + dz * dz < self.radius * self.radius:
return -dx, -dy, -dz
def draw(self):
pass
def begin_drag(self, window, offset):
self.win = window
self.offset = offset
return self
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self)
class LabelHandle(Handle):
def __init__(self, player):
super(LabelHandle, self).__init__(player)
self.text = pyglet.text.Label('', font_size=10, color=(0, 0, 0, 255),
anchor_y='top', anchor_x='center')
def hit_test(self, x, y, z):
return None
def draw(self):
if hasattr(self.player, 'label'):
x, _, y = self.player.position
# ech. fudge scale back to 1
mat = (GLfloat * 16)()
glGetFloatv(GL_MODELVIEW_MATRIX, mat)
glPushMatrix()
glTranslatef(x, y, 0)
glScalef(1/mat[0], 1/mat[5], 1/mat[10])
glTranslatef(0, -5, 0)
self.text.text = self.player.label
self.text.draw()
glPopMatrix()
class PositionHandle(Handle):
tip = 'position'
radius = .3
def draw(self):
glPushMatrix()
glTranslatef(self.player.position[0], self.player.position[2], 0)
glColor3f(1, 0, 0)
glBegin(GL_TRIANGLES)
glVertex2f(0, self.radius)
glVertex2f(-self.radius * math.sqrt(3) / 2, -.5 * self.radius)
glVertex2f(self.radius * math.sqrt(3) / 2, -.5 * self.radius)
glEnd()
glPopMatrix()
def pos(self):
return self.player.position
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
pos = self.win.mouse_transform(x, y)
self.player.position = \
(pos[0] - self.offset[0],
pos[1] - self.offset[1],
pos[2] - self.offset[2])
class OrientationHandle(Handle):
radius = .1
length = 1.5
def pos(self):
x, _, z = self.player.position
dir = self.get_orientation()
sz = math.sqrt(dir[0] ** 2 + dir[1] ** 2 + dir[2] ** 2) or 1
if sz != 0:
x += dir[0] / sz * self.length
z += dir[2] / sz * self.length
return x, 0, z
def draw(self):
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
px, _, py = self.player.position
x, _, y = self.pos()
# Dashed line
glColor3f(.3, .3, .3)
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, 0x7777)
glBegin(GL_LINES)
glVertex2f(px, py)
glVertex2f(x, y)
glEnd()
# This handle (orientation)
glColor3f(1, 1, 0)
disc(self.radius, x, y)
glPopAttrib()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
self.set_orientation(
(hx - self.offset[0] - px,
hy - self.offset[1] - py,
hz - self.offset[2] - pz))
class ConeOrientationHandle(OrientationHandle):
tip = 'cone_orientation'
def get_orientation(self):
return self.player.cone_orientation
def set_orientation(self, orientation):
self.player.cone_orientation = orientation
class ForwardOrientationHandle(OrientationHandle):
tip = 'forward_orientation'
def get_orientation(self):
return self.player.forward_orientation
def set_orientation(self, orientation):
self.player.forward_orientation = orientation
class ConeAngleHandle(Handle):
radius = .1
def pos(self):
px, py, pz = self.player.position
angle = orientation_angle(self.player.cone_orientation)
angle += self.get_angle() * math.pi / 180. / 2
x = math.cos(angle) * self.length
z = math.sin(angle) * self.length
return px + x, py, pz + z
def draw(self):
glPushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT)
# Fill
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glColor4f(*self.fill_color)
px, _, py = self.player.position
angle = orientation_angle(self.player.cone_orientation)
a = self.get_angle() * math.pi / 180.
disc(self.length, px, py,
start=angle - a/2,
end=angle + a/2)
# Handle
x, _, y = self.pos()
glColor4f(*self.color)
disc(self.radius, x, y)
glPopAttrib()
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
angle = orientation_angle(self.player.cone_orientation)
hangle = orientation_angle((hx - px, hy - py, hz - pz))
if hangle < angle:
hangle += math.pi * 2
res = min(max((hangle - angle) * 2, 0), math.pi * 2)
self.set_angle(res * 180. / math.pi)
class ConeInnerAngleHandle(ConeAngleHandle):
tip = 'cone_inner_angle'
length = 1.
color = (.2, .8, .2, 1)
fill_color = (0, 1, 0, .1)
def get_angle(self):
return self.player.cone_inner_angle
def set_angle(self, angle):
self.player.cone_inner_angle = angle
class ConeOuterAngleHandle(ConeAngleHandle):
tip = 'cone_outer_angle'
length = 1.2
color = (.2, .2, .8, 1)
fill_color = (0, 0, 1, .1)
def get_angle(self):
return self.player.cone_outer_angle
def set_angle(self, angle):
self.player.cone_outer_angle = angle
class MoreHandle(Handle):
tip = 'More...'
radius = .2
open = False
open_width = 1.5
open_height = 1.5
def pos(self):
x, y, z = self.player.position
return x + 1, y, z + 1
def draw(self):
x, _, z = self.pos()
if self.open:
x -= .2
z += .2
glPushAttrib(GL_ENABLE_BIT)
glEnable(GL_BLEND)
glColor4f(1, 1, 1, .8)
glBegin(GL_QUADS)
glVertex2f(x, z)
glVertex2f(x + self.open_width, z)
glVertex2f(x + self.open_width, z - self.open_height)
glVertex2f(x, z - self.open_height)
glEnd()
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glColor4f(0, 0, 0, 1)
glBegin(GL_QUADS)
glVertex2f(x, z)
glVertex2f(x + self.open_width, z)
glVertex2f(x + self.open_width, z - self.open_height)
glVertex2f(x, z - self.open_height)
glEnd()
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopAttrib()
else:
glColor3f(1, 1, 1)
disc(self.radius, x, z)
glColor3f(0, 0, 0)
circle(self.radius, x, z)
r = self.radius - 0.1
glBegin(GL_LINES)
glVertex2f(x - r, z)
glVertex2f(x + r, z)
glVertex2f(x, z - r)
glVertex2f(x, z + r)
glEnd()
def begin_drag(self, window, offset):
self.open = True
self.win = window
self.win.set_more_player_handles(self.player)
return self
def on_mouse_press(self, x, y, button, modifiers):
x, y, z = self.win.mouse_transform(x, y)
for handle in self.win.more_handles:
if handle.hit_test(x, y, z):
return
self.win.set_more_player_handles(None)
self.win.remove_handlers(self)
self.open = False
def on_mouse_release(self, x, y, button, modifiers):
pass
class SliderHandle(Handle):
length = 1.
width = .05
radius = .1
def __init__(self, player, x, z):
super(SliderHandle, self).__init__(player)
self.x = x
self.z = z
def pos(self):
x, y, z = self.player.position
x += self.x + self.get_value() * self.length
z += self.z
return x, y, z
def draw(self):
x = self.x + self.player.position[0]
z = self.z + self.player.position[2]
# Groove
glColor3f(.5, .5, .5)
glBegin(GL_QUADS)
glVertex2f(x, z - self.width/2)
glVertex2f(x + self.length, z - self.width/2)
glVertex2f(x + self.length, z + self.width/2)
glVertex2f(x, z + self.width/2)
glEnd()
# Thumb
x, _, z = self.pos()
glColor3f(.2, .2, .2)
disc(self.radius, x, z)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
px, py, pz = self.player.position
hx, hy, hz = self.win.mouse_transform(x, y)
value = float(hx - px - self.x) / self.length
value = min(max(value, 0), 1)
self.set_value(value)
class VolumeHandle(SliderHandle):
tip = 'volume'
def __init__(self, player):
super(VolumeHandle, self).__init__(player, 1, .9)
def get_value(self):
return self.player.volume
def set_value(self, value):
self.player.volume = value
class ListenerVolumeHandle(SliderHandle):
tip = 'volume'
def __init__(self, player):
super(ListenerVolumeHandle, self).__init__(player, -.5, -1)
def get_value(self):
return self.player.volume
def set_value(self, value):
self.player.volume = value
class MinDistanceHandle(SliderHandle):
tip = 'min_distance'
def __init__(self, player):
super(MinDistanceHandle, self).__init__(player, 1, .6)
def get_value(self):
return self.player.min_distance / 5.
def set_value(self, value):
self.player.min_distance = value * 5.
class MaxDistanceHandle(SliderHandle):
tip = 'max_distance'
def __init__(self, player):
super(MaxDistanceHandle, self).__init__(player, 1, .3)
def get_value(self):
return min(self.player.max_distance / 5., 1.0)
def set_value(self, value):
self.player.max_distance = value * 5.
class ConeOuterGainHandle(SliderHandle):
tip = 'cone_outer_gain'
def __init__(self, player):
super(ConeOuterGainHandle, self).__init__(player, 1, 0)
def get_value(self):
return self.player.cone_outer_gain
def set_value(self, value):
self.player.cone_outer_gain = value
class SoundSpaceWindow(pyglet.window.Window):
def __init__(self, **kwargs):
kwargs.update(dict(
caption='Sound Space',
resizable=True,
))
super(SoundSpaceWindow, self).__init__(**kwargs)
self.players = []
self.handles = []
self.more_handles = []
listener = pyglet.media.get_audio_driver().get_listener()
self.handles.append(PositionHandle(listener))
self.handles.append(ForwardOrientationHandle(listener))
self.handles.append(ListenerVolumeHandle(listener))
self.handles.append(LabelHandle(listener))
self.tip = pyglet.text.Label('', font_size=10, color=(0, 0, 0, 255),
anchor_y='top', anchor_x='center')
self.tip_player = None
# pixels per unit
self.zoom = 40
self.tx = self.width/2
self.ty = self.height/2
def add_player(self, player):
self.players.append(player)
self.handles.append(PositionHandle(player))
self.handles.append(ConeOrientationHandle(player))
self.handles.append(ConeInnerAngleHandle(player))
self.handles.append(ConeOuterAngleHandle(player))
self.handles.append(LabelHandle(player))
self.handles.append(MoreHandle(player))
def set_more_player_handles(self, player):
if player:
self.more_handles = [
VolumeHandle(player),
MinDistanceHandle(player),
MaxDistanceHandle(player),
ConeOuterGainHandle(player),
]
else:
self.more_handles = []
def draw_background(self):
glLoadIdentity()
glPushAttrib(GL_CURRENT_BIT)
glColor3f(1, 1, 1)
glBegin(GL_LINES)
for i in range(0, self.width, self.zoom):
glVertex2f(i, 0)
glVertex2f(i, self.height)
for i in range(0, self.height, self.zoom):
glVertex2f(0, i)
glVertex2f(self.width, i)
glEnd()
glPopAttrib()
def camera_transform(self):
glLoadIdentity()
glTranslatef(self.tx, self.ty, 0)
glScalef(self.zoom, self.zoom, 1)
def mouse_transform(self, x, y):
return (float(x - self.tx) / self.zoom,
0,
float(y - self.ty) / self.zoom)
def player_transform(self, player):
return (player.position[0] * self.zoom + self.tx,
player.position[2] * self.zoom + self.ty)
def hit_test(self, mouse_x, mouse_y):
x, y, z = self.mouse_transform(mouse_x, mouse_y)
for handle in self.more_handles[::-1] + self.handles[::-1]:
offset = handle.hit_test(x, y, z)
if offset:
return handle, offset
return None, None
def on_draw(self):
glClearColor(.8, .8, .8, 1)
self.clear()
self.draw_background()
glPushMatrix()
self.camera_transform()
for handle in self.handles + self.more_handles:
handle.draw()
glPopMatrix()
if self.tip_player:
player_pos = self.player_transform(self.tip_player)
self.tip.x = player_pos[0]
self.tip.y = player_pos[1] - 15
self.tip.draw()
def on_mouse_scroll(self, x, y, dx, dy):
self.zoom += dy * 10
self.zoom = min(max(self.zoom, 10), 100)
def on_mouse_press(self, x, y, button, modifiers):
handle, offset = self.hit_test(x, y)
if handle:
self.push_handlers(handle.begin_drag(self, offset))
else:
self.push_handlers(PanView(self))
def on_mouse_motion(self, x, y, dx, dy):
handle, offset = self.hit_test(x, y)
if handle:
self.tip.text = handle.tip
pos = self.player_transform(handle.player)
self.tip_player = handle.player
else:
self.tip.text = ''
class PanView(object):
def __init__(self, window):
self.win = window
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
self.win.tx += dx
self.win.ty += dy
if __name__ == '__main__':
# We swap Y and Z, moving to left-handed system
listener = pyglet.media.get_audio_driver().get_listener()
listener.up_orientation = (0, -1, 0)
# Start facing up (er, forwards)
listener.forward_orientation = (0, 0, 1)
listener.label = 'Listener'
w = SoundSpaceWindow()
r = reader.SpaceReader(w)
r.read(pyglet.resource.file('space.txt'))
player_group = pyglet.media.PlayerGroup(w.players)
player_group.play()
pyglet.app.run()
| Python |
#!/usr/bin/python
# $Id:$
import pyglet
window = pyglet.window.Window()
tablets = pyglet.input.get_tablets()
canvases = []
if tablets:
print 'Tablets:'
for i, tablet in enumerate(tablets):
print ' (%d) %s' % (i + 1, tablet.name)
print 'Press number key to open corresponding tablet device.'
else:
print 'No tablets found.'
@window.event
def on_text(text):
try:
index = int(text) - 1
except ValueError:
return
if not (0 <= index < len(tablets)):
return
name = tablets[i].name
try:
canvas = tablets[i].open(window)
except pyglet.input.DeviceException:
print 'Failed to open tablet %d on window' % index
print 'Opened %s' % name
@canvas.event
def on_enter(cursor):
print '%s: on_enter(%r)' % (name, cursor)
@canvas.event
def on_leave(cursor):
print '%s: on_leave(%r)' % (name, cursor)
@canvas.event
def on_motion(cursor, x, y, pressure):
print '%s: on_motion(%r, %r, %r, %r)' % (name, cursor, x, y, pressure)
@window.event
def on_mouse_press(x, y, button, modifiers):
print 'on_mouse_press(%r, %r, %r, %r' % (x, y, button, modifiers)
@window.event
def on_mouse_release(x, y, button, modifiers):
print 'on_mouse_release(%r, %r, %r, %r' % (x, y, button, modifiers)
pyglet.app.run()
| Python |
#!/usr/bin/env python
'''Demonstrates basic use of IncrementalTextLayout and Caret.
A simple widget-like system is created in this example supporting keyboard and
mouse focus.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import pyglet
class Rectangle(object):
'''Draws a rectangle into a batch.'''
def __init__(self, x1, y1, x2, y2, batch):
self.vertex_list = batch.add(4, pyglet.gl.GL_QUADS, None,
('v2i', [x1, y1, x2, y1, x2, y2, x1, y2]),
('c4B', [200, 200, 220, 255] * 4)
)
class TextWidget(object):
def __init__(self, text, x, y, width, batch):
self.document = pyglet.text.document.UnformattedDocument(text)
self.document.set_style(0, len(self.document.text),
dict(color=(0, 0, 0, 255))
)
font = self.document.get_font()
height = font.ascent - font.descent
self.layout = pyglet.text.layout.IncrementalTextLayout(
self.document, width, height, multiline=False, batch=batch)
self.caret = pyglet.text.caret.Caret(self.layout)
self.layout.x = x
self.layout.y = y
# Rectangular outline
pad = 2
self.rectangle = Rectangle(x - pad, y - pad,
x + width + pad, y + height + pad, batch)
def hit_test(self, x, y):
return (0 < x - self.layout.x < self.layout.width and
0 < y - self.layout.y < self.layout.height)
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(400, 140, caption='Text entry')
self.batch = pyglet.graphics.Batch()
self.labels = [
pyglet.text.Label('Name', x=10, y=100, anchor_y='bottom',
color=(0, 0, 0, 255), batch=self.batch),
pyglet.text.Label('Species', x=10, y=60, anchor_y='bottom',
color=(0, 0, 0, 255), batch=self.batch),
pyglet.text.Label('Special abilities', x=10, y=20,
anchor_y='bottom', color=(0, 0, 0, 255),
batch=self.batch)
]
self.widgets = [
TextWidget('', 200, 100, self.width - 210, self.batch),
TextWidget('', 200, 60, self.width - 210, self.batch),
TextWidget('', 200, 20, self.width - 210, self.batch)
]
self.text_cursor = self.get_system_mouse_cursor('text')
self.focus = None
self.set_focus(self.widgets[0])
def on_resize(self, width, height):
super(Window, self).on_resize(width, height)
for widget in self.widgets:
widget.width = width - 110
def on_draw(self):
pyglet.gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def on_mouse_motion(self, x, y, dx, dy):
for widget in self.widgets:
if widget.hit_test(x, y):
self.set_mouse_cursor(self.text_cursor)
break
else:
self.set_mouse_cursor(None)
def on_mouse_press(self, x, y, button, modifiers):
for widget in self.widgets:
if widget.hit_test(x, y):
self.set_focus(widget)
break
else:
self.set_focus(None)
if self.focus:
self.focus.caret.on_mouse_press(x, y, button, modifiers)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if self.focus:
self.focus.caret.on_mouse_drag(x, y, dx, dy, buttons, modifiers)
def on_text(self, text):
if self.focus:
self.focus.caret.on_text(text)
def on_text_motion(self, motion):
if self.focus:
self.focus.caret.on_text_motion(motion)
def on_text_motion_select(self, motion):
if self.focus:
self.focus.caret.on_text_motion_select(motion)
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.TAB:
if modifiers & pyglet.window.key.MOD_SHIFT:
dir = -1
else:
dir = 1
if self.focus in self.widgets:
i = self.widgets.index(self.focus)
else:
i = 0
dir = 0
self.set_focus(self.widgets[(i + dir) % len(self.widgets)])
elif symbol == pyglet.window.key.ESCAPE:
pyglet.app.exit()
def set_focus(self, focus):
if self.focus:
self.focus.caret.visible = False
self.focus.caret.mark = self.focus.caret.position = 0
self.focus = focus
if self.focus:
self.focus.caret.visible = True
self.focus.caret.mark = 0
self.focus.caret.position = len(self.focus.document.text)
window = Window(resizable=True)
pyglet.app.run()
| Python |
import pyglet
window = pyglet.window.Window() # Er 640, 480 by default
# Ég var fáránlega lengi að láta collision testin virka og þess vegna er forritið mjög ljótt
# og líklega erfitt að skilja af hverju ég geri sumt en ég breytti oft bara hlutum til þess að þau mundu virka
# sama hvort ég sá ástæðuna eða ekki. Þessi geturFardidUpp/Nidur/Haegri/Vinstri er frekar ljótur útaf endurtekningu
# en hann virkar (ólíkt hinum tveimur aðferðunum sem ég byrjaði á) þannig ég nota hann.
class Bill:
def __init__(self, mynd, xGrid, yGrid, snuningur):
self.mynd = mynd
if snuningur == 'n':
self.x = 80*(xGrid)
else:
self.x = 80*(xGrid-1)
self.y = 80*(yGrid-1)
self.sprite = pyglet.sprite.Sprite(self.mynd, self.x, self.y)
self.snuningur = snuningur
self.valinn = False # Til að það sé bara hægt að draga einn bíl í einu
def draw(self):
self.sprite.set_position(self.x, self.y)
if self.snuningur == 'n':
self.sprite.rotation = 270 # Snúið um 270 gráður
self.sprite.draw()
def snapToGrid(self):
self.x = round(self.x/80)*80
self.y = round(self.y/80)*80
def breidd(self):
if self.snuningur == 'h':
return self.sprite.width
return self.sprite.height
def haed(self):
if self.snuningur == 'h':
return self.sprite.height
return self.sprite.width
def snertir(self, x, y):
if self.snuningur == 'h':
if self.x < x < self.x + self.sprite.width and self.y < y < self.y + self.sprite.height:
return True
else:
if self.x - self.sprite.height < x < self.x and self.y < y < self.y + self.sprite.width:
return True
return False
def geturFaridHaegri(self, uppteknirReitir, dx):
yStadur = round(self.y/80)
xStadur = (self.x+dx+self.breidd())//80
if uppteknirReitir[yStadur][xStadur]:
return False
return True
def geturFaridVinstri(self, uppteknirReitir, dx):
yStadur = round(self.y/80)
xStadur = (self.x+dx)//80
if uppteknirReitir[yStadur][xStadur]:
return False
return True
def geturFaridUpp(self, uppteknirReitir, dy):
xStadur = round(self.x/80)-1
yStadur = (self.y+dy+self.haed())//80
if uppteknirReitir[yStadur][xStadur]:
return False
return True
def geturFaridNidur(self, uppteknirReitir, dy):
xStadur = round(self.x/80)-1
yStadur = (self.y+dy)//80
if uppteknirReitir[yStadur][xStadur]:
return False
return True
def move(self, dx, dy, uppteknirReitir):
if self.snuningur == 'h' and 0 < self.x+dx < 480 - self.sprite.width:
maFaera = True
if dx > 0 and not self.geturFaridHaegri(uppteknirReitir, dx):
maFaera = False
if dx < 0 and not self.geturFaridVinstri(uppteknirReitir, dx):
maFaera = False
if maFaera:
self.x += dx
elif self.snuningur == 'n' and 0 < self.y+dy < 480 - self.sprite.width:
maFaera = True
if dy > 0 and not self.geturFaridUpp(uppteknirReitir, dy):
maFaera = False
if dy < 0 and not self.geturFaridNidur(uppteknirReitir, dy):
maFaera = False
if maFaera:
self.y += dy
grid = pyglet.resource.image('myndir/grid.png')
raudurBill = pyglet.resource.image('myndir/raudurBill.png') # Hægt væri að loada mynd í klasanum en ef sami bíll er notaður tvisvar þá borgar þetta sig því myndinni er loadað einu sinni (held ég)
blarBill = pyglet.resource.image('myndir/blarBill.png')
graennBill = pyglet.resource.image('myndir/graennBill.png')
# Borðið er 6*6, hver reitur er 80 pixlar, 160 pixlar lausir vinstra megin
bilar = [] # Hver bíll verður listi á forminu [mynd, x, y, snuningur]
bilar.append(Bill(raudurBill, 2, 4, 'h'))
bilar.append(Bill(blarBill, 1, 6, 'h'))
bilar.append(Bill(graennBill, 3, 1, 'h'))
bilar.append(Bill(blarBill, 5, 2, 'h'))
bilar.append(Bill(graennBill, 1, 3, 'n'))
bilar.append(Bill(blarBill, 1, 1, 'n'))
bilar.append(Bill(graennBill, 4, 3, 'n'))
bilar.append(Bill(graennBill, 6, 4, 'n'))
def uppteknirReitir(bilaListi):
reitir = []
for x in range(0, 6):
reitir.append([])
for x in range(0, 6):
for i in range(0, 6):
reitir[x].append(False)
for bill in bilaListi:
if bill.snertir(i*80+40, x*80+40) and not bill.valinn:
reitir[x][i] = True
return reitir
@window.event
def on_draw():
window.clear()
grid.blit(0,0)
for bill in bilar:
bill.draw()
@window.event
def on_mouse_release(x, y, button, modifiers):
for bill in bilar:
bill.snapToGrid()
bill.valinn = False
if bilar[0].snertir(5*80+40, 3*80+40):
print("Sigur")
@window.event
def on_mouse_press(x, y, button, modifiers):
for bill in bilar:
if bill.snertir(x, y):
bill.valinn = True
@window.event
def on_mouse_drag(x, y, dx, dy, button, modifiers):
for bill in bilar:
if bill.valinn: # and bill.snertir(x, y) gæti verið bætt við til að breyta hegðun þegar dregið er
bill.move(dx, dy, uppteknirReitir(bilar))
pyglet.app.run()
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/basic-som.py
# Basic self-organizing maps
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# A self-organizing map is a neural network that can be used to solve a number
# of problems including clustering and classification. The goal of this tutorial
# is to show how to use Peach to deal with self-organizing maps. We start by
# creating a small map with only five neurons, with two inputs each. To inspect
# the behaviour of the map, we will set the weights to known values.
nn = p.SOM((5, 2))
nn.weights = array([
[ 0.5, 0.0 ],
[ -0.5, 0.0 ],
[ 0.0, 0.0 ],
[ 0.5, 0.5 ],
[-0.5, 0.5 ] ], dtype=float)
# We want to see how the map behaves, so we feed the network with one vector.
# This is it.
x = array([ 0.0, -0.5 ], dtype=float)
# It is expected that the winning neuron is the one represented in the third
# line. We will check to see if that's the case.
y = nn(x)
# We update the winning neuron. Notice that the self-organizing map retains the
# information about the winner, so all we need to do is to pass the input vector
# to calculate the updating.
nn.learn(x)
# Checking the results:
print "The winning neuron was %d" % y
print "Its updated synaptic weights are:"
print nn.weights
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/linear-prediction.py
# Using neural networks to predict number sequences
################################################################################
# A neural network can be used to predict future values of a sequence of
# numbers. Wold's Decomposition Theorem stablishes that any sequence can be
# split in a regular and predictable part and an innovation process (which is
# discrete white noise, and thus impredictable). The goal of this tutorial is
# to show how to use the neural network implementation of Peach to do this.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import random
import peach as p
# First, we create the network, with only one layer with only one neuron in it.
# The neuron has many inputs and only one output. The activation function is the
# identity. This kind of neuron is usually known as ADALINE (Adaptive Linear
# Neuron, later Adaptive Linear Element). We use as learning algorithm the LMS
# algorithm.
N = 32
nn = p.FeedForward((N, 1), phi=p.Identity, lrule=p.LMS(0.05))
# The lists below will track the values of the sequence being predicted and of
# the error for plotting.
xlog = [ ]
ylog = [ ]
elog = [ ]
error = 1.
i = 0
x = zeros((N, 1), dtype=float) # Input is a column-vector.
while i < 2000 and error > 1.e-10:
# The sequence we will predict is the one generated by a cossinus. The next
# value of the function is the desired output of the neuron. The neuron will
# use past values to predict the unknown value. To spice things, we add some
# gaussian noise (actually, it might help the convergence).
d = cos(2.*pi/128. * i) + random.gauss(0., 0.01)
# Here, we activate the network to calculate the prediction.
y = nn(x)[0, 0] # Notice that we need to access the output
error = abs(d - y) # as a vector, since that's how the NN work.
nn.learn(x, d)
# We store the results to plot later.
xlog.append(d)
ylog.append(y)
elog.append(error)
# Here, we apply a delay in the sequence by shifting every value one
# position back. We are using N (=32) samples to make the prediction, but
# the code here makes no distinction and could be used with any number of
# coefficients in the prediction. The last value of the sequence is put in
# the [0] position of the vector.
x[1:] = x[:-1]
x[0] = d
i = i + 1
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(array(xlog), 'b--')
pylab.plot(array(ylog), 'g')
pylab.plot(array(elog), 'r:')
pylab.legend([ "$x$", "$y$", "$error$" ])
pylab.subplot(212)
pylab.grid(True)
pylab.stem(arange(0, N), reshape(nn[0].weights, (N,)), "k-", "ko", "k-")
pylab.xlim([0, N-1])
pylab.savefig("linear-prediction.png")
except ImportError:
print "After %d iterations:" % (len(elog),)
print nn[0].weights | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/xor-problem.py
# Solving the exclusive-or problem
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# First, we import the needed modules
from numpy import *
import peach as p
# The network to solve the exclusive-or problem must have two layers: two input
# neurons and one output neuron. The neurons should be biased and the activation
# function should be sigmoidal. The learning rule is backpropagation.
nn = p.FeedForward((2, 2, 1), p.TanH, p.BackPropagation(0.2), True)
# This is the training set. A training set is a list of tuples with two elements
# each. The first element is the input vector, the second element is the output
# vector. In this case, the output is just a number.
train_set = [ ( array(( -1., -1.)), -1. ),
( array(( -1., 1.)), 1. ),
( array(( 1., -1.)), 1. ),
( array(( 1., 1.)), -1. ) ]
# This shows the training set to the network.
nn.train(train_set)
# Testing the results:
print nn[0].weights
print nn[1].weights
for x, _ in train_set:
print x, " => ", nn(x) | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/basic-neural-network.py
# Basic example of using neural networks
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# Creation of a three layer neural network. The first layer is the input layer,
# receives the inputs, and it contains 2 neurons. The second layer is the first
# hidden layer and will contain 2 neurons. The last layer is the output layer
# and will contain only one neuron. We will choose as activation function the
# `Sigmoid` class, and `BackPropagation` class as the learning rule.
nn = p.FeedForward((2, 2, 1), p.Sigmoid, p.BackPropagation)
# We can use the `[]` operator to select a specific layer. Notice that the input
# layer cannot be modified in any way, so `[0]`-th layer is the first hidden
# layer. The `weights` property of a layer is an array containing the synaptic
# weights of those layer -- each line is the weight vector of the corresponding
# neuron.
nn[0].weights = array([[ 0.5, 0.5 ],
[ -0.5, -0.5 ]], dtype = float)
# We set up the synaptic weights of the neuron on the last layer. Notice that
# this layer could be accessed as `[-1]`, as a FeedForward network is only a
# list of `Layers`.
nn[1].weights = array([ 0.25, -0.25 ], dtype = float)
# This is an example that will be shown to the network for learning.
x = array([ 0.8, 0.2 ], dtype = float) # Input vector
d = 0.9 # Desired response
# We feed the network the input by calling the network as a function. The
# argument to the function is the input vector. The function returns the output
# of the network.
y = nn(x)
# The method below tells the network to learn the example. The specified
# learning rule, in this case the BackPropagation, will be used to adapt the
# synaptic weights of the network.
nn.feed(x, d)
# The code below shows the results
print "Peach tutorial on neural network basics"
print
print "Input to the network:"
print x
print "Network output:"
print y
print
print "Error: %7.4f" % (d - y,)
print
print "Updated weights in the first hidden layer:"
print nn[0].weights
print
print "Updated weights in the output layer:"
print nn[1].weights
print
print "Network output with updated weights:"
print nn(x)
print
print "Updated error: %7.4f" % (d - nn(x),)
print | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/radial-basis-function.py
# Radial basis functions for interpolation.
################################################################################
# Different kinds of neural network use different functions as activations for
# some neurons. The Radial Basis Network (RBFN) uses radial basis functions
# (RBFs), which are functions that are symmetric according to the origin. These
# functions offer good local approximation for a function and, for that reason,
# they are especially good for interpolation and function approximation. In
# fact, it can be prooved that any continuous function can be approximated by
# this type of neural network.
#
# In this network, the first layer contains RBFs as activation functions.
# Because of this, the training is done in a different way, even though a
# gradient approach could be used. However, it is in general better to use a
# clustering algorithm, such as K-Means, to find the centers of the functions,
# and compute the width of the functions proportionally to the greatest distance
# among centers. The second layer of the network is a linear weighted combiner
# that sums the contribution of each neuron.
#
# In this tutorial, we will see how to use Peach's implementation of RBFNs to
# interpolate a function. We start, as always, by importing the necessary
# modules.
from numpy import *
import peach as p
from random import randrange
# Let's define the parameters of the simulation. We desire to interpolate a
# period of a cosine, from 20 samples of the function. The lines below create
# the training set.
x = linspace(-pi, pi, 20)
x = x.reshape((20, 1))
y = cos(x)
# Next, we create the network and the corresponding algorithms. We will use a
# 7th-order network to make the interpolation. This must suffice, but it is
# expected that small errors occur, especially near the limits of the interval.
N = 7 # Order of the simulation
# We will use the K-Means algorithm to clusterize the centers. Any other such
# algorithm (such as SOMs or Fuzzy C-Means) could be used. We initialize the
# centers with a reasonable set of values. linearly distributed through the
# interval. Upon calling the K-Means, we receive the clustered centers as a
# result.
km = p.KMeans(x, N)
km.c = linspace(-pi, pi, N)
c = km()
# Here, we initialize the Radial Basis Network. Notice that we don't need to
# pass a lot of parameters to the network -- only the centers, here represented
# by ``c``, are mandatory. The default RBF is the gaussian, and the default
# combiner is Linear, but we could specify different functions using ``phi`` and
# ``phi2`` arguments, respectively. The learning procedure for the second layer
# is the backpropagation (to take care of the situations in which ``phi2`` is
# not Linear) but this can be changed through the ``lrule`` argument.
rbf = p.RBFN(c)
# Now, we present some examples from the training set to the network. Notice
# that we already know where the centers are located, and in the instantiation
# of the RBFN algorithm the widths of the RBFs were computed. You can access the
# widths through the ``width`` property.
i = 0
error = 1.
while i < 5000 and error > 5.e-2:
# We choose randomly one point in the training set
j = randrange(20)
# And feed it to the network.
e = rbf.feed(x[j], y[j])
i = i + 1
# In the end of the training, you can inspect the weights of the second layer by
# using the ``weights`` property. We print them here, for reference:
print rbf.weights
# We will now plot the result. We apply the RBFN in 500 points in the domain
# from -pi to pi. Notice that the result of a neural network is a two-dimension
# array, so we select first line, first column
t = linspace(-pi, pi, 500)
yh = [ ]
for tj in t:
yh.append(rbf(tj)[0, 0])
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.grid(True)
pylab.stem(x, y, "k-", "ko", "k-")
pylab.plot(t, yh)
pylab.xlim([ -pi, pi ])
pylab.ylim([ amin(y)-0.1, amax(y)+0.1 ])
pylab.xticks([ -pi, -pi/2, 0., pi/2, pi ])
pylab.gca().set_xticklabels([ r'$-\pi$', r'$-\pi/2$', r'$0$', r'$\pi/2$', r'$\pi$' ])
pylab.savefig("radial-basis-function.png")
except ImportError:
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/polynomial-regression.py
# Using neural networks to approximate functions by polynomials.
################################################################################
# The learning algorithm of neural networks are based, mainly, in the mean
# squared error of the output, considering the desired output of the network.
# The same criterium is used for a lot of other types of approximation. The most
# used, and one of the first, is the linear regression, where the relation of a
# set of points is approximated by a straight line. The theory for the linear
# regression can be easily expanded to approximate functions by polynomials, but
# in general, the equations are not simple.
#
# A neuron can be used to make this approximation simpler. This tutorial shows
# how to do it.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import random
import peach as p
# We create here the neural network. To make the polynomial regression, instead
# of supplying the neuron with the value of the independent variable, we supply
# its integer powers. The number of inputs will be, thus, the order of the
# polynomial used for approximation. With this approach, our neural network will
# be very simple: a single neuron with N+1 inputs, one output, and linear
# activation. The learning algorithm will be the LMS algorithm.
N = 10
nn = p.FeedForward((N, 1), phi=p.Identity, lrule=p.LMS(0.05), bias=True)
# We will map a period of a sinus. It is not expected that the coefficients
# found here will be the same of the Taylor Series, since the optimization
# criterium is diferent.
error = 1
i = 0
powers = arange(N, dtype=float) # This vector will be used to calculate the powers
while i < 2000:
# Here, we generate one value in the interval e calculate the desired
# response. We raise ``x`` to ``powers`` to generate the inputs. It is easy
# to see that the polynomial regression is a linear combination of the
# powers of a variable.
x = random.uniform(-0.5, 0.5)
d = sin(pi*x)
xo = x ** powers
# We feed the network, calculate the error and teach the network
y = nn(xo)
error = abs(d - y)
nn.learn(xo, d)
i = i + 1
print "Coefficients: "
for i in range(N):
print "%d -> %10.7f" % (i, nn[0].weights[0][i])
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``polynomial-regression.png``.
try:
import pylab
x = linspace(-0.5, 0.5, 200)
y = sin(pi*x)
ye = [ ]
for xo in x:
ye.append(nn(xo**powers)[0, 0])
ye = array(ye)
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(x, y, 'b--')
pylab.plot(x, ye, 'g')
pylab.xlim([ -0.5, 0.5 ])
pylab.legend([ "$y$", "$\hat{y}$" ])
pylab.subplot(212)
pylab.grid(True)
pylab.stem(arange(0, N+1), reshape(nn[0].weights, (N+1,)), "k-", "ko", "k-")
pylab.xlim([0, N])
pylab.savefig("polynomial-regression.png")
except ImportError:
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/linear-prediction.py
# Using neural networks to predict number sequences
################################################################################
# A neural network can also be used to interpolate values of a sequence or
# function of which little is known. Typically, the structure used is a double
# layer neural network. In the first layer, neurons with sigmoid activation to
# map the nonlinearities in the function, and in the second layer a linear
# activated neuron, to combine the inputs. This structure is commonly known as
# MADALINE (Multiple Adaptive Linear Element). The goal of this tutorial is to
# show how to use Peach to do this.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:from numpy import *
from numpy import *
import peach as p
from random import randrange
# This is the sequence that we will be interpolating, consisting of twenty
# samples evenly distributed over the interval from -pi/2 to pi/2. While with
# this data simpler methods could be used (since we will be interpolating a
# sinus), our goal is to show how to do that with neural networks
t = linspace(-pi/2., pi/2., 20)
x = sin(t)
# We create the neural network with the command below. It should be a network
# with one input neuron, and one output neuron. The hidden layer must have
# enough neurons to map the variations in the function. We will map part of a
# sinus, so 10 neurons should be enough. We must make the neurons biased. The
# reason for this is that the sigmoids of the first layer must be shifted to the
# position of the variation it will map. The second layer does not need to be
# biased, in general, but there is no harm in letting it be.
nn = p.FeedForward((1, 10, 1), phi=(p.Sigmoid, p.Identity),
lrule=p.BackPropagation(0.05), bias=True)
# We will use this list to track the error for posterior plotting
elog = [ ]
error = 1.
# The learning loop will be executed at most 5000 times. Most of the time, this
# is an overkill, but given the stochastic nature of the learning, sometimes it
# is needed. Anyways, we put a stop trigger -- when the error reaches 1e-5, the
# algorithm stops
i = 0
while i < 5000 and error > 1.e-5:
# The training sequence is a list of samples. We could shuffle the list and
# present them in the same order for many epochs. However, it can be useful
# randomly choose a sample everytime, since the randomness can help
# convergence.
index = randrange(20)
xx = t[index]
dd = x[index]
# Here, the network is fed, the error is collected and logged, and the
# learning process takes place.
y = nn(xx)[0, 0]
error = abs(dd - y)
nn.learn(xx, dd)
elog.append(error)
i = i + 1
# We will now plot the response of the network for more than twenty samples,
# using 500 samples in the same interval.
ty = linspace(-pi/2, pi/2, 500)
y = [ ]
for tt in ty:
yy = nn(tt)[0, 0]
y.append(yy)
print nn[0].weights
print error
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(array(elog), 'b')
pylab.xlabel("Error")
pylab.subplot(212)
pylab.grid(True)
pylab.stem(t, x, "k-", "ko", "k-")
pylab.plot(ty, y)
pylab.xlim([ -pi/2, pi/2 ])
pylab.ylim([ amin(y)-0.1, amax(y)+0.1 ])
pylab.xticks([ -pi/2, -pi/4, 0., pi/4, pi/2 ])
pylab.gca().set_xticklabels([ r'$-\pi/2$', r'$-\pi/4$', r'$0$', r'$\pi/4$', r'$\pi/2$' ])
pylab.savefig("interpolation.png")
except ImportError:
print "After %d iterations:" % (len(elog),)
print nn[0].weights
print nn[1].weights | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/k-means.py
# Example of using K-Means implementation
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace. We will
# also need the random module:
from numpy import *
import random
import peach as p
# In this tutorial, we reproduce the behaviour we seen in the self-organizing
# maps tutorial (please, refer to that tutorial for more information). The
# K-Means algorithm has the ability to find the clusters that partition a given
# set of points. This tutorial shows graphically how this happens. We have a set
# of points in the cartesian plane, each coordinate obtained from a central
# point plus a random (gaussian, average 0, small variance) shift in some
# direction.
# First, we create the training set:
train_size = 300
centers = [ array([ 1.0, 0.0 ], dtype=float),
array([ 1.0, 1.0 ], dtype=float),
array([ 0.0, 1.0 ], dtype=float),
array([-1.0, 1.0 ], dtype=float),
array([-1.0, 0.0 ], dtype=float) ]
xs = [ ]
for i in range(train_size):
x1 = random.gauss(0.0, 0.1)
x2 = random.gauss(0.0, 0.1)
xs.append(centers[i%5] + array([ x1, x2 ], dtype=float))
# Since we are working on the plane, each example and each cluster will have two
# components. We will have five clusters, since we have five centers. The
# K-Means instance is created below.
km = p.KMeans(xs, 5)
for i in range(5):
km.c[i, 0] = 0.3 * cos(i*pi/4)
km.c[i, 1] = 0.3 * sin(i*pi/4)
# The __call__ interface runs the algorithm till completion. It returns the
# centers of the classification. We might pass the parameter imax to the
# algorithm. This is the maximum number of passes. In general, K-Means will
# converge very fastly and with little error. The default value for this
# parameter is 20. Notice, however, that the algorithm automatically stops if
# there are no more changes in the clusters.
c = km()
print "The algorithm converged to the centers:"
print c
print
# If the system has the plot package matplotlib, this tutorial tries to plot
# the training set and the clustered centers. The plot is saved in the file
# ``k-means.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
xs = array(xs)
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
print xs
print c
a1.scatter(xs[:, 0], xs[:, 1], color='black', marker='x')
a1.scatter(c[:, 0], c[:, 1], color='red', marker='o')
savefig("k-means.png")
except ImportError:
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/mapping-a-plane.py
# Using a neuron to map a plane
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# We import numpy, random and peach, as those are the libraries we will be
# using.
from numpy import *
import random
import peach as p
# Here, we create a FeedForward network with only one layer, with two inputs and
# one output. Since it is only one output, there is only one neuron in the
# layer. We use LMS as the learning algorithm, and the neuron must be biased.
# Notice that we use 0.02 as the learning rate for the algorithm.
nn = p.FeedForward((2, 1), lrule=p.LMS(0.02), bias=True)
# These lists will track the values of the synaptic weights and the error. We
# will use it later to plot the convergence, if the matplotlib module is
# available
w0 = [ ]
w1 = [ ]
w2 = [ ]
elog = [ ]
# We start by setting the error to 1, so we can enter the looping:
error = 1
while abs(error) > 1e-7: # Max error is 1e-7
x1 = random.uniform(-10, 10) # Generating an example
x2 = random.uniform(-10, 10)
x = array([ x1, x2 ], dtype = float)
d = -1 - 3*x1 + 2*x2 # Plane equation
error = nn.feed(x, d)
w0.append(nn[0].weights[0][0]) # Tracking error and weights.
w1.append(nn[0].weights[0][1])
w2.append(nn[0].weights[0][2])
elog.append(d - nn(x)[0, 0])
print "After %d iterations, we get as synaptic weights:" % (len(w0),)
print nn[0].weights
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``mapping-a-plane.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
vsize = 4
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(array(w0))
a1.plot(array(w1))
a1.plot(array(w2))
a1.plot(array(elog))
a1.set_ylim([-10, 10])
a1.legend([ "$w_0$", "$w_1$", "$w_2$", "$error$" ])
savefig("mapping-a-plane.png")
except ImportError:
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/custom-activation.py
# Using custom activation functions
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# First, we import the needed modules
from numpy import *
import peach as p
# Peach can work with custom activation functions, if you need to use them.
# There are a number of ways of doing that. Please, use this file as a template
# to create your own.
# An existing activation function can be customized during its instantiation.
# For example, if you want to use a diferent ramp, starting in (-1, -1) and
# ending in (1, 1), you can use the simple command:
CustomActivationFunction1 = p.Ramp((-1., -1.), (1., 1.))
# You can also create your activation function as a simple function, and turn it
# into an activation function. Let's use the ramp example as above. You can
# create a simple activation function like this:
def custom_ramp(x):
if x < -1. : return -1.
elif x > 1.: return 1.
else: return x
CustomActivationFunction2 = p.Activation(custom_ramp)
# But, please, notice that the derivative for a function create as above will
# be estimated. While it is not a problem for a ramp function, it might be a
# problem with diferent functions, and it can be less efficient too.
# The last way to create an activation function is by subclassing Activation.
# To do that, you will have to implement the __init__, __call__ and derivative
# methods. Use the code below (where we implement, again, a ramp) as a template:
class CustomActivationFunction3(p.Activation):
'''
Don't forget to document your code!
'''
def __init__(self):
'''
We won't pass any parameter to the initializer of the class, since we
don't want further customization.
'''
p.Activation.__init__(self)
def __call__(self, x):
'''
The __call__ interface should receive a (vector of) scalar and return a
scalar. Remember that activation functions should be able to deal with
vectors, if needed, so using the ``numpy`` functions will really help!
Please consult the numpy documentation to understand what ``select``
does.
'''
return select([ x < -1., x < 1. ], [ -1., x ], 1.)
def derivative(self, x, dx=1.e-5):
'''
The derivative of your function must be implemented in this method,
because a lot of the convergence methods use it. The method should
receive a (vector of) scalar and return a scalar. The second parameter
will be the precision of the derivative, and it is seldom used. It is
a good measure to put it as a named parameter, just to make it sure.
'''
return select([ x < -1., x < 1. ], [ 0., 1. ], 0.)
# The functions thus generated can be used in any place where an activation
# function or an activation class would be used. | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/self-organizing-maps.py
# Extended example on self-organizing maps
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace. We will
# also need the random module:
from numpy import *
import random
import peach as p
# A self-organizing map has the ability to automatically recognize and classify
# patterns. This tutorial shows graphically how this happens. We have a set of
# points in the cartesian plane, each coordinate obtained from a central point
# plus a random (gaussian, average 0, small variance) shift in some direction.
# We use this set to build the network.
# First, we create the training set:
train_size = 300
centers = [ array([ 1.0, 0.0 ], dtype=float),
array([ 1.0, 1.0 ], dtype=float),
array([ 0.0, 1.0 ], dtype=float),
array([-1.0, 1.0 ], dtype=float),
array([-1.0, 0.0 ], dtype=float) ]
xs = [ ]
for i in range(train_size):
x1 = random.gauss(0.0, 0.1)
x2 = random.gauss(0.0, 0.1)
xs.append(centers[i%5] + array([ x1, x2 ], dtype=float))
# Since we are working on the plane, each example and each neuron will have two
# coordinates. We will use five neurons (since we have five centers). The
# self-organizing map is created by the line below. Our goal is to show how the
# weights converge to the mass center of the point clouds, so we initialize the
# weights to show it:
nn = p.SOM((5, 2))
for i in range(5):
nn.weights[i, 0] = 0.3 * cos(i*pi/4)
nn.weights[i, 1] = 0.3 * sin(i*pi/4)
# We use these lists to track the variation of each neuron:
wlog = [ [ nn.weights[0] ],
[ nn.weights[1] ],
[ nn.weights[2] ],
[ nn.weights[3] ],
[ nn.weights[4] ] ]
# Here we feed and update the network. We could use the ``train`` method, but
# we want to track the weights:
for x in xs:
y = nn(x)
nn.learn(x)
wlog[y].append(array(nn.weights[y]))
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``self-organizing-maps.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
for x in xs:
plot( [x[0]], [x[1]], 'ko')
for w in wlog:
w = array(w[1:])
plot( w[:, 0], w[:, 1], '-x')
savefig("self-organizing-maps.png")
except ImportError:
print "After %d iterations:" % (train_size,)
print nn.weights | Python |
# -*- coding: utf-8 -*-
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/hopfield-network.py
# Hopfield neural networks for recovering patterns
################################################################################
# We import here the needed libraries.
import numpy as p
from peach import *
from random import shuffle
# This function will be used to show the patterns in a way that is easier to
# the eyes. The patterns are represented by numbers -1 and 1. This function
# converts a array in a 7x5 representation, substituting a blank space for -1
# and an asterisk for 1.
def show(x):
n = len(x)
for i in range(0, n):
if i%5 == 0:
print
if x[i] == 1:
print '*',
else:
print ' ',
# This is the training set. We will be recognizing vowels in a 7x5 pattern,
# defined here. A Hopfield network doesn't have a very good storage capacity
# that allows recovering of patterns without a big probability of error.
# 5 in 35 patterns is a good enough number that allows a good demonstration.
training_set = [
array([ -1, -1, 1, -1, -1, # A
-1, 1, -1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, 1, 1, 1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1 ]),
array([ 1, 1, 1, 1, 1, # E
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, -1,
1, -1, -1, -1, -1,
1, -1, -1, -1, -1,
1, 1, 1, 1, 1 ]),
array([ -1, 1, 1, 1, -1, # I
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, -1, 1, -1, -1,
-1, 1, 1, 1, -1 ]),
array([ -1, -1, 1, -1, -1, # O
-1, 1, -1, 1, -1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
-1, 1, -1, 1, -1,
-1, -1, 1, -1, -1 ]),
array([ 1, -1, -1, -1, 1, # U
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
1, -1, -1, -1, 1,
-1, 1, 1, 1, -1 ])
]
# We define here a test pattern, from one of the patterns of the training set,
# and adding noise. Noise is added by randomly choosing a position in the
# pattern and inverting it.
x = array(training_set[0])
n = len(x)
noise_position = range(n)
shuffle(noise_position)
for k in noise_position[:8]: # We invert as much as 8 points in the pattern
x[k] = -x[k]
x = x.reshape((n, 1))
# Here we create the Hopfield network. The Hopfield instantiation needs only the
# size of the network. The default activation function is the Signum, but it can
# be changed by passing any activation function as the second argument of the
# class instantiation.
nn = Hopfield(n)
nn.train(training_set)
# If we call the network, we can retrieve the best pattern automatically. But we
# want to see the progress of the network, so we will step by the algorithm to
# show partial results. To perform a step of the convergence, use the ``step``
# method
i = 0
xx = array(x)
while i < 100:
xx = nn.step(xx)
show(xx)
#raw_input() # Uncomment this line if you want to see step-by-step.
i = i + 1 # You will need to press return to perform the next step.
# Shows the initial and the last states. Notice: errors might happen. Since the
# amount of noise is relatively high, the network can converge to a final state
# that is not one of the stored patterns
print "\n\n"+"-"*40
print "\n\nInitial state:"
show(x)
print "\n\nFinal state:"
show(xx) | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: mutivariate-optmization.py
# Optimization of two-variable functions
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Gradient of Rosenbrock function
def df(xy):
x, y = xy
return array( [ -2.*(1.-x) - 4.*x*(y - x*x), 2.*(y - x*x) ])
# Hessian of Rosenbrock function
def hf(xy):
x, y = xy
return array([ [ 2. - 4.*(y - 3.*x*x), -4.*x ],
[ -4.*x, 2. ] ])
# We will allow no more than 100 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 100
# We first try using the gradient optimizer. To create an optimizer, we declare
# the function to be optimized and the first estimate. Depending on the
# algorithm, other parameters are available. Please, consult the documentation
# for more information.
grad = p.Gradient(f, (0.1, 0.2), df=df)
xd = [ 0.1 ] # We use those to keep track of the convergence
yd = [ 0.2 ]
i = 0
while i < iMax:
x, e = grad.step()
xd.append(x[0])
yd.append(x[1])
i = i + 1
xd = array(xd)
yd = array(yd)
# Gradient optimizer with estimate derivative. To allow the algorithm to
# estimate the derivative, we don't declare a derivative function.
grad2 = p.Gradient(f, (0.1, 0.2))
xe = [ 0.1 ] # We use those to keep track of the convergence
ye = [ 0.2 ]
i = 0
while i < iMax:
x, e = grad2.step()
xe.append(x[0])
ye.append(x[1])
i = i + 1
xe = array(xe)
ye = array(ye)
# Newton optimizer, with explicit declaration of the gradient and hessian.
newton = p.Newton(f, (0.1, 0.2), df=df, hf=hf)
xn = [ 0.1 ]
yn = [ 0.2 ]
i = 0
while i < iMax:
x, e = newton.step()
xn.append(x[0])
yn.append(x[1])
i = i + 1
xn = array(xn)
yn = array(yn)
# Newton optimizer, with estimated gradient and hessian. We allow the algorithm
# to estimate these functions by not declaring them.
newton2 = p.Newton(f, (0.1, 0.2))
xq = [ 0.1 ]
yq = [ 0.2 ]
i = 0
while i < iMax:
x, e = newton2.step()
xq.append(x[0])
yq.append(x[1])
i = i + 1
xq = array(xq)
yq = array(yq)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``multivariate-optimization.png``.
# These commands are used to create the functions that will plot the contour
# lines of the Rosenbrock function.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = f((x, y))
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 8)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xd, yd)
a1.plot(xe, ye)
a1.plot(xn, yn)
a1.plot(xq, yq)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.legend([ 'Gradient', 'Gradient/Estimated', 'Newton',
'Newton/Estimated' ])
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("multivariate-optimization.png")
except ImportError:
print "Gradient Optimizer: ", (xd[-1], yd[-1])
print "Gradient Optimizer with estimated gradient: ", (xe[-1], ye[-1])
print "Newton Optimizer: ", (xn[-1], yn[-1])
print "Newton Optimizer with estimated hessian: ", (xq[-1], yq[-1])
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: derivative-optmization.py
# Simple optimization of one-variable functions by derivative methods
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# The Rosenbrock function will be used to test the optimizers. This is a
# simplified version, which allows faster convergence, and it serves only the
# purposes of testing.
def f(x):
return (1.-x)**2. + (1.-x*x)**2.
# The derivative of the Rosenbrock function. This is used in the Gradient and
# Newton search methods.
def df(x):
return -2.*(1.-x) - 4.*(1.-x*x)*x
# The second derivative of the Rosenbrock function. Used in Newton method.
def ddf(x):
return 2. - 4.*(1. - 3.*x*x)
# We will allow no more than 100 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 100
# Gradient optimizer. To create an optimizer, we declare the function to be
# optimized and the first estimate. Depending on the algorithm, other parameters
# are available. Please, consult the documentation for more information.
grad = p.Gradient(f, 0.84, df=df)
xd = [ 0.84 ]
i = 0
while i < iMax:
x, e = grad.step()
xd.append(x)
i = i + 1
xd = array(xd)
# Gradient optimizer with estimated gradient
grad2 = p.Gradient(f, 0.84)
xe = [ 0.84 ]
i = 0
while i < iMax:
x, e = grad2.step()
xe.append(x)
i = i + 1
xe = array(xe)
# Newton optimizer with explicit declaration of derivatives
newton = p.Newton(f, 0.84, df=df, hf=ddf)
xn = [ 0.84 ]
i = 0
while i < iMax:
x, e = newton.step()
xn.append(x)
i = i + 1
xn = array(xn)
# Newton optimizer with estimated gradient and hessian
newton2 = p.Newton(f, 0.84)
xq = [ 0.84 ]
i = 0
while i < iMax:
x, e = newton2.step()
xq.append(x)
i = i + 1
xq = array(xq)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``derivative-optimization.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
vsize = 4
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xd)
a1.plot(xe)
a1.plot(xn)
a1.plot(xq)
a1.legend([ "Gradient", "Gradient/Estimated", "Newton",
"Newton/Estimated" ])
savefig("derivative-optimization.png")
except ImportError:
print "Gradient Optimizer: ", xd[-1]
print "Gradient Optimizer with estimated derivatives: ", xe[-1]
print "Newton Optimizer: ", xn[-1]
print "Newton Optimizer with estimated derivatives: ", xq[-1] | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: binary-simulated-annealing.py
# Optimization of functions by binary simulated annealing
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# We will allow no more than 1000 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 1000
# Here we create the optimizer. This optimizer is created in quite a different
# way from the others we used. This is because the discrete optimizer deals with
# bit streams, and changing values on bits can be highly unpredictable --
# especially if you are dealing with floating point representations. The first
# two parameters, however, are the same as in other optimizers: the function to
# be optimized, and the first estimate. The next parameter is a string of
# formats that will be used to decode the bit stream: they work exactly as in
# the struct module that is included in every Python distribution -- please,
# consult the official documentation for more information. In this example, we
# use two floating points, thus 'ff'. The next parameter is a list of ranges of
# values that will be allowed for our estimates. This might be needed if you use
# floating points: the algorithm perform a sanity check to guarantee that the
# bitarray really represents a floating point in the allowed range -- in case
# it is not, these are used to random choose new estimates.
bsa = p.BinarySA(f, (0.1, 0.2), [ (0., 2.), (0., 2.) ], 'ff')
xd = [ ]
yd = [ ]
i = 0
while i < iMax:
x, e = bsa.step()
xd.append(x[0])
yd.append(x[1])
i = i + 1
xd = array(xd)
yd = array(yd)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``binary-simulated-annealing.png``.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = (1-x)**2 + (y-x*x)**2
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(6, 6)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xd, yd)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("binary-simulated-annealing.png")
except ImportError:
print "Results: ", (xd[-1], yd[-1])
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: particle-swarm-optimization.py
# Optimization of functions by particle swarms
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
from numpy.random import random
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# We will allow no more than 500 iterations. For the simplified Rosenbrock
# function, no more than that will be needed. In fact, the particle swarm
# optimizer is very good in this, probably a lot less than 500 iterations will
# be needed. But we want to be sure.
iMax = 500
# We need to create a population of estimates. In algorithms based on
# populations, such as this or Genetic Algorithms, a list of estimates should
# be created. To this end, we will specify the ranges of the variables in the
# interval from 0. to 2., and randomly choose from this.
# Ranges are specified as a list of tuples, where each element is the allowed
# range for the corresponding variable. In each tuple, the first value is the
# lower limit of the intervalo, and the second value is its upper limit.
ranges = [ ( 0., 2. ), ( 0., 2. ) ]
# We use the numpy.random function to generate a population of five particles,
# randomly positioned in a circle around the point (1., 1.), with radius 1. We
# will do this to better observe the behaviour of the algorithm. In general,
# swarms should have more than five particles, but this is enough for this
# case. This line will be more effective if we convert ranges to a numpy array:
ranges = array(ranges)
theta = random((5, 1)) * 2. * pi
x0 = c_[ 1. + cos(theta), 1. + sin(theta) ]
# Here we create the optimizer. There is not much difference in how an
# stochastic optmizer is created, comparing to deterministic ones. However,
# since this is a population based algorithm, we will track every particle and
# the global best to have a better understanding of the behaviour in the plot.
# (We emphasize, however, that an animation in this case would be a lot better).
pso = p.ParticleSwarmOptimizer(f, x0, ranges)
xd = [ ]
yd = [ ]
xx = [ ]
i = 0
while i < iMax:
x, e = pso.step()
xd.append(x[0])
yd.append(x[1])
xx.append(pso[:])
i = i + 1
xd = array(xd)
yd = array(yd)
xx = array(xx)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``particle-swarm-optimization.png``.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = (1-x)**2 + (y-x*x)**2
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(6, 6)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xx[:, 0, 0], xx[:, 0, 1], 'gray')
a1.plot(xx[:, 1, 0], xx[:, 1, 1], 'gray')
a1.plot(xx[:, 2, 0], xx[:, 2, 1], 'gray')
a1.plot(xx[:, 3, 0], xx[:, 3, 1], 'gray')
a1.plot(xx[:, 4, 0], xx[:, 4, 1], 'gray')
a1.plot(xd, yd)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("particle-swarm-optimization.png")
except ImportError:
print "Results: ", (xd[-1], yd[-1])
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: linear-optmization.py
# Simple optimization of one-variable functions
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# The Rosenbrock function will be used to test the optimizers. This is a
# simplified version, which allows faster convergence, and it serves only the
# purposes of testing.
def f(x):
return (1-x)**2 + (1-x*x)**2
# We will allow no more than 100 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 100
# Direct one-dimensional optimizer. To create an optimizer, we declare the
# function to be optimized and the first estimate. Depending on the algorithm,
# other parameters are available. Please, consult the documentation for more
# information.
linear = p.Direct1D(f, 0.75)
xl = [ ] # These lists will track the progress of the algorithm
i = 0
while i < iMax:
x, e = linear.step()
xl.append(x)
i = i + 1
xl = array(xl)
# Parabolic interpolator optimizer.
interp = p.Interpolation(f, (0., 0.75, 1.5))
xp = [ ] # These lists will track the progress of the algorithm
i = 0
while i < iMax:
x, e = interp.step()
x0, x1, x2 = x # division by zero. We check for that
q0 = x0 * (f(x1) - f(x2))
q1 = x1 * (f(x2) - f(x0))
q2 = x2 * (f(x0) - f(x1))
q = q0 + q1 + q2
if q == 0: # if q==0, all estimates are identical
xm = x0
else:
xm = 0.5 * (x0*q0 + x1*q1 + x2*q2) / (q0 + q1 + q2)
xp.append(xm)
i = i + 1
xp = array(xp)
# Golden Section Optimizer
golden = p.GoldenRule(f, (0.25, 1.25))
xg = [ ]
i = 0
while i < iMax:
x, e = golden.step()
xo, xh = x
xm = 0.5 * (xo+xh)
xg.append(xm)
i = i + 1
xg = array(xg)
# Fibonacci optimizer
fib = p.Fibonacci(f, (0.75, 1.4))
xf = [ ]
i = 0
while i < iMax:
x, e = fib.step()
xo, xh = x
xm = 0.5 * (xo+xh)
xf.append(xm)
i = i + 1
xf = array(xf)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-optimization.png``.
try:
from matplotlib import *
from matplotlib.pylab import *
vsize = 4
figure(1).set_size_inches(8, 4)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xl)
a1.plot(xp)
a1.plot(xg)
a1.plot(xf)
a1.legend([ "Linear", "Interpolation", "Golden Section", "Fibonacci" ])
savefig("linear-optimization.png")
except ImportError:
print "Linear Optimizer: ", xl[-1]
print "Interpolation Optimizer: ", xp[-1]
print "Golden Rule Optimizer: ", xg[-1]
print "Fibonacci Optimizer: ", xf[-1]
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: particle-swarm-optimization.py
# Optimization of functions by particle swarms
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
from numpy.random import random
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Genetic algorithms are designed to maximize functions, not to minimize them.
# To minimize a function, we just negate it.
def J(x):
return -f(x)
# We will allow no more than 500 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 500
# We need to create a population of estimates. In algorithms based on
# populations, such as this or Genetic Algorithms, a list of estimates should
# be created. To this end, we will specify the ranges of the variables in the
# interval from 0. to 2., and randomly choose from this.
# Ranges are specified as a list of tuples, where each element is the allowed
# range for the corresponding variable. In each tuple, the first value is the
# lower limit of the intervalo, and the second value is its upper limit.
ranges = [ ( 0., 2. ), ( 0., 2. ) ]
# We use the numpy.random function to generate a population of five particles,
# randomly positioned in a circle around the point (1., 1.), with radius 1. We
# will do this to better observe the behaviour of the algorithm. In general,
# swarms should have more than five particles, but this is enough for this
# case. This line will be more effective if we convert ranges to a numpy array:
ranges = array(ranges)
theta = random((25, 1)) * 2. * pi
x0 = c_[ 1. + cos(theta), 1. + sin(theta) ]
# Here we create the optimizer. There is not much difference in how an
# stochastic optmizer is created, comparing to deterministic ones. However,
# since this is a population based algorithm, we will track every particle and
# the global best to have a better understanding of the behaviour in the plot.
# (We emphasize, however, that an animation in this case would be a lot better).
ga = p.GeneticAlgorithm(J, x0, ranges, 'ff')
xd = [ ]
yd = [ ]
xx = [ ]
i = 0
while i < iMax:
x, _ = ga.step()
x = x.decode()
xd.append(x[0])
yd.append(x[1])
xx.append([ c.decode() for c in ga ])
i = i + 1
xd = array(xd)
yd = array(yd)
xx = array(xx)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``genetic-algorithms.png``.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = (1-x)**2 + (y-x*x)**2
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(6, 6)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xx[:, 0, 0], xx[:, 0, 1], 'gray')
a1.plot(xx[:, 1, 0], xx[:, 1, 1], 'gray')
a1.plot(xx[:, 2, 0], xx[:, 2, 1], 'gray')
a1.plot(xx[:, 3, 0], xx[:, 3, 1], 'gray')
a1.plot(xx[:, 4, 0], xx[:, 4, 1], 'gray')
a1.plot(xd, yd)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("genetic-algorithms.png")
except ImportError:
pass
print "Results: ", (xd[-1], yd[-1])
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: quasi-newton-optimization.py
# Optimization of two-variable functions by quasi-newton methods
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Gradient of Rosenbrock function
def df(xy):
x, y = xy
return array( [ -2.*(1.-x) - 4.*x*(y - x*x), 2.*(y - x*x) ])
# We will allow no more than 200 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 200
# The first estimate of the minimum is given by the DFP method. Notice that the
# creation of the optimizer is virtually the same as every other. We could, in
# this and in the other optimizers, omit the derivative function and let Peach
# estimate it for us.
dfp = p.DFP(f, (0.1, 0.2), df=df)
xd = [ 0.1 ]
yd = [ 0.2 ]
i = 0
while i < iMax:
x, e = dfp.step()
xd.append(x[0])
yd.append(x[1])
i = i + 1
xd = array(xd)
yd = array(yd)
# We now try the BFGS optimizer.
bfgs = p.BFGS(f, (0.1, 0.2), df=df)
xb = [ 0.1 ]
yb = [ 0.2 ]
i = 0
while i < iMax:
x, e = bfgs.step()
xb.append(x[0])
yb.append(x[1])
i = i + 1
xb = array(xb)
yb = array(yb)
# Last but not least, the SR1 optimizer
sr1 = p.SR1(f, (0.1, 0.2), df=df)
xs = [ 0.1 ]
ys = [ 0.2 ]
i = 0
while i < iMax:
x, e = sr1.step()
xs.append(x[0])
ys.append(x[1])
i = i + 1
xs = array(xs)
ys = array(ys)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``quasi-newton-optimization.png``.
# These commands are used to create the functions that will plot the contour
# lines of the Rosenbrock function.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = (1-x)**2 + (y-x*x)**2
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(6, 6)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xd, yd)
a1.plot(xb, yb)
a1.plot(xs, ys)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.legend([ 'DFP', 'BFGS', 'SR1' ])
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("quasi-newton-optimization.png")
except ImportError:
print "DFP Optimizer: ", (xd[-1], yd[-1])
print "BFGS Optimizer: ", (xb[-1], yb[-1])
print "SR1 Optimizer: ", (xs[-1], ys[-1])
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: continuous-simulated-annealing.py
# Optimization of functions by simulated annealing
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# This is a simplified version of the Rosenbrock function, to demonstrante
# how the optimizers work.
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Gradient of Rosenbrock function. This is used to enhance the estimate even in
# the case a random change doesn't improve the estimate.
def df(xy):
x, y = xy
return array( [ -2.*(1.-x) - 4.*x*(y - x*x), 2.*(y - x*x) ])
# We will allow no more than 500 iterations. For the simplified Rosenbrock
# function, no more than that will be needed.
iMax = 500
# Here we create the optimizer. There is not much difference in how an
# stochastic optmizer is created, comparing to deterministic ones.
csa = p.ContinuousSA(f, (0.1, 0.2), [ (0., 2.), (0., 2.) ], optm=p.Gradient(f, df, h=0.05))
xd = [ ]
yd = [ ]
i = 0
while i < iMax:
x, e = csa.step()
xd.append(x[0])
yd.append(x[1])
i = i + 1
xd = array(xd)
yd = array(yd)
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``continuous-simulated-annealing.png``.
x = linspace(0., 2., 250)
y = linspace(0., 2., 250)
x, y = meshgrid(x, y)
z = (1-x)**2 + (y-x*x)**2
levels = exp(linspace(0., 2., 10)) - 0.9
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(6, 6)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.grid(True)
a1.plot(xd, yd)
a1.contour(x, y, z, levels, colors='k', linewidths=0.75)
a1.set_xlim([ 0., 2. ])
a1.set_xticks([ 0., 0.5, 1., 1.5, 2. ])
a1.set_ylim([ 0., 2. ])
a1.set_yticks([ 0.5, 1., 1.5, 2. ])
savefig("continuous-simulated-annealing.png")
except ImportError:
print "Results: ", (xd[-1], yd[-1])
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/simple-controller.py
# A simgle-input-single-output Mamdani controller
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
import pylab as p
# This tutorial shows how to work with a fuzzy-based controller. It is really
# easy to build a standard controller using Peach. We won't go into details of
# how a controller should work -- please, consult the literature on the subject,
# as it is very rich and explains the topic a lot better than we could do here.
#
# We will show how to build a simple single-input single-output controller for
# no specific plant -- it will be completelly abstract. The goal is to show how
# to work with the capabilities built in Peach for dealing with it. A Mamdani
# controller has, typically, three steps: fuzzification, in which numerical
# values are converted to the fuzzy domain; decision rules, where the
# relationship between controlled variable and manipulated variable are
# stablished; and defuzzification, where we travel back from fuzzified domain to
# crisp numerical values.
#
# To build a controller, thus, we need to specify the membership functions of
# the controlled variable. There are a number of ways of doing that (please, see
# the tutorial on membership functions for more detail): we could use built-in
# membership functions; define our own membership functions; or use a support
# function, such as the one below.
#
# Suppose we wanted to use three membership functions to fuzzify our input
# variable: a decreasing ramp from -1 to 0, a triangle ramp from -1 to 0 to 1,
# and an increasing ramp from 0 to 1. We could define these functions as:
#
# i_neg = DecreasingRamp(-1, 0)
# i_zero = Triangle(-1, 0, 1)
# i_pos = IncreasingRamp(0, 1)
#
# Nothing wrong with this method. But, since sequences of triangles are so usual
# in fuzzy controllers, Peach has two methods to create them in a batch. The
# first one is the ``Saw`` function: given an interval and a number of
# functions, it splits the interval in equally spaced triangles. The second one
# is the ``FlatSaw`` function: it also creates a sequence of equally spaced
# triangles, but use a decreasing ramp as the first function, and an increasing
# function as the last one. Both of them return a tuple containing the functions
# in order. The same functions above could be created with the command:
i_neg, i_zero, i_pos = FlatSaw((-2, 2), 3)
# assuming, that is, that the input variable will range from -2 to 2. Notice
# that if we don't use the correct interval, the starts and ends of the
# functions won't fall where we want them. Notice, also, that we are here using
# membership functions, not fuzzy sets!
# We will also need to create membership functions for the output variable.
# Let's assume we need three functions as above, in the range from -10 to 10. We
# do:
o_neg, o_zero, o_pos = FlatSaw((-10, 10), 3)
# The control will be done following the decision rules:
#
# IF input is negative THEN output is positive
# IF input is zero THEN output is zero
# IF input is positive THEN output is negative
#
# We will create now the controller that will implement these rules. Here is
# what we do:
Points = 100
yrange = numpy.linspace(-10., 10., 500)
c = Controller(yrange)
# Here, ``yrange`` is the interval in which the output variable is defined. Our
# controlled doesn't have any rules, so we must add them. To add rules to a
# controller, we use the ``add_rule`` method. A rule is a tuple with the
# following format:
#
# ((input_mf, ), output_mf)
#
# where ``input_mf`` is the condition, and ``output_mf`` is the consequence.
# This format can be used to control multiple variables. For instance, if you
# wanted to control three variables, a rule would have the form:
#
# ((input1_mf, input2_mf, input3_mf), output_mf)
#
# Notice that the conditions are wrapped in a tuple themselves. We will add the
# rules of our controller now:
c.add_rule(((i_neg,), o_pos))
c.add_rule(((i_zero,), o_zero))
c.add_rule(((i_pos,), o_neg))
# The controller is ready to run. We use the ``__call__`` interface to pass to
# the controller the values of the variables (in the form of a n-dimension
# array), and it returns us the result. We will iterate over the domain of the
# input variable to plot the transfer function:
x = numpy.linspace(-2., 2., Points)
y = [ ]
for x0 in x:
y.append(c(x0))
y = numpy.array(y)
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'simple-controller-mf.png', containing the membership
# functions, and another called 'simple-controller.png', containing the transfer
# function.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8., 4.)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.plot(x, i_neg(x))
a1.plot(x, i_zero(x))
a1.plot(x, i_pos(x))
a1.set_xlim([ -2., 2. ])
a1.set_ylim([ -0.1, 1.1 ])
a1.legend([ 'Negative', 'Zero', 'Positive' ])
savefig("simple-controller-mf.png")
clf()
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.plot(x, y, 'k-')
a1.set_xlim([ -2., 2. ])
a1.set_ylim([ -10., 10. ])
savefig("simple-controller.png")
except ImportError:
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/norms-conorms.py
# How to use t-norms and s-norms (norms and conorms)
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
from peach.fuzzy.norms import *
# The standard operations with sets -- and thus fuzzy sets -- are intersection,
# union and complement. Fuzzy sets, however, are an extension to classical sets,
# and there are infinite ways to extend those operations. Thus the existence of
# norms, conorms and negations. We show here how to use them in Peach.
# First, remember that we must create the sets. A FuzzySet instance is returned
# when you apply a membership function over a domain. It is, in fact, a
# standard array, but making it a new class allow us to redefine operations.
# Here we create the sets:
x = numpy.linspace(-5.0, 5.0, 500)
a = Triangle(-3.0, -1.0, 1.0)(x)
b = Triangle(-1.0, 1.0, 3.0)(x)
# To set norms, conorms and negations, we use, respectively, the methods
# set_norm, set_conorm and set_negation. Notice that those are class methods, so
# if you change the norm for one instance of a set, you change for them all! So,
# it is better to use the class name to select the methods. Here, we will use
# Zadeh norms, that are already defined in Peach. Notice that we use the
# standard operators for and, or and not operations (respectively, &, | e ~):
FuzzySet.set_norm(ZadehAnd)
FuzzySet.set_conorm(ZadehOr)
aandb_zadeh = a & b # A and B
aorb_zadeh = a | b # A or B
# Probabilistic norms are based on the corresponding operations in probability.
# Here we use them
FuzzySet.set_norm(ProbabilisticAnd)
FuzzySet.set_conorm(ProbabilisticOr)
aandb_prob = a & b
aorb_prob = a | b
# There are other norms that we could use. Please, check the documentation for
# a complete list. Here are some of them:
# Norms: ZadehAnd, ProbabilisticAnd, DrasticProduct, EinsteinProduct
# Conorms: ZadehOr, ProbabilisticOr, DrasticSum, EinsteinSum
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'norms-conorms.png'.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 6)
a1 = axes([ 0.125, 0.555, 0.775, 0.40 ])
a2 = axes([ 0.125, 0.125, 0.775, 0.40 ])
a1.hold(True)
a1.plot(x, a, 'k:')
a1.plot(x, b, 'k:')
a1.plot(x, aandb_zadeh, 'k')
a1.plot(x, aandb_prob, 'k-.')
a1.set_xlim([ -5, 5 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks([])
a1.set_yticks([ 0.0, 1.0 ])
a1.legend((r'$A$', r'$B$', 'Zadeh AND', 'Prob. AND'))
a2.hold(True)
a2.plot(x, a, 'k:')
a2.plot(x, b, 'k:')
a2.plot(x, aorb_zadeh, 'k')
a2.plot(x, aorb_prob, 'k-.')
a2.set_xlim([ -5, 5 ])
a2.set_ylim([ -0.1, 1.1 ])
a2.set_xticks([])
a2.set_yticks([ 0.0, 1.0 ])
a2.legend((r'$A$', r'$B$', 'Zadeh OR', 'Prob. OR'))
savefig("norms-conorms.png")
except ImportError:
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/fuzzy-defuzzy.py
# Fuzzification and defuzzification are not complementary operations
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
# To demonstrate that fuzzifying and subsequently defuzzifying a crisp set does
# not correspond to identity function, we will iterate over an interval to see
# what happens.
# The FlatSaw function is a very handy function that creates a set of membership
# functions equally spaced over an interval. The border functions are ramps, and
# the middle functions are triangles. This distribution of functions is very
# common in fuzzy control.
x = numpy.linspace(-2., 2., 500)
xgn, xpn, xz, xpp, xgp = FlatSaw((-2., 2.), 5)
y = numpy.zeros((500, ), dtype=float)
# We iterate over the domain, first fuzzifying the value of the variable x on
# those membership functions, then defuzzifying it using the Centroid function.
# Here, we use the value of the x variable itself to cut the membership
# functions.
for i in xrange(0, 500):
yt = xgn(x[i]) & xgn(x) |\
xpn(x[i]) & xpn(x) |\
xz(x[i]) & xz(x) |\
xpp(x[i]) & xpp(x) |\
xgp(x[i]) & xgp(x)
y[i] = Centroid(yt, x)
# Just to show what happens we will plot the procedure for a given value of the
# variable x.
x0 = -0.67
y0 = xgn(x0) & xgn(x) |\
xpn(x0) & xpn(x) |\
xz(x0) & xz(x) |\
xpp(x0) & xpp(x) |\
xgp(x0) & xgp(x)
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'fuzzy-defuzzy.png'.
try:
import pylab
pylab.figure(1).set_size_inches(8., 6.)
pylab.subplot(211)
pylab.hold(True)
pylab.plot(x, xgn(x), 'k-')
pylab.plot(x, xpn(x), 'k-')
pylab.plot(x, xz(x), 'k-')
pylab.plot(x, xpp(x), 'k-')
pylab.plot(x, xgp(x), 'k-')
pylab.plot(x, y0, 'k-')
pylab.fill(x, y0, 'gray')
pylab.plot([ x0, x0 ], [ -0.1, 1.1 ], 'k--')
pylab.xticks([ x0 ])
pylab.figure(1).axes[0].set_xticklabels([ r'$x_0$' ])
pylab.ylim([ -0.1, 1.1 ])
pylab.yticks([ 0., 0.25, 0.5, 0.75, 1. ])
pylab.subplot(212)
pylab.hold(True)
pylab.plot(x, x, 'k--')
pylab.plot(x, y, 'k')
pylab.plot([ x0, x0 ], [ -2., 2. ], 'k--')
pylab.xticks([ x0 ])
pylab.figure(1).axes[1].set_xticklabels([ r'$x_0$' ])
pylab.yticks([ ])
pylab.savefig('fuzzy-defuzzy.png')
except ImportError:
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/membership-functions.py
# How to use pre-defined membership functions
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
# Membership functions for representing fuzzy sets are available as classes in
# Peach. This way, by instantiating a membership function, you can configure and
# change default parameters to suit it to your needs.
# First, we create the domain in which we will represent the functions.
x = numpy.linspace(-5.0, 5.0, 500)
# Next, we create the functions by instantiating the corresponding classes.
# For more information on parameters of each function, please see the reference
# for the module. The functions thus created can be applied to numbers or arrays
# directly.
# An increasing ramp, starting in x=2 and ending in x=4
increasing_ramp = IncreasingRamp(2.0, 4.0)
# A decreasing ramp, starting in x=-2 and ending in x=-2
decreasing_ramp = DecreasingRamp(-4.0, -2.0)
# A triangle function, starting in x=-3, ending in x=0, with maximum in x=-1.5
triangle = Triangle(-3.0, -1.5, 0.0)
# A trapezoid, starting in x=-1, ending in x=3, with maximum from x=0 to x=2
trapezoid = Trapezoid(-1.0, 0.0, 2.0, 3.0)
# A gaussian with center x=-1.5 and default variance 1.
gaussian = Gaussian(-1.5)
# An increasing sigmoid, with middle point in x=3 and inclination 2.5
increasing_sigmoid = IncreasingSigmoid(3.0, 2.5)
# A decreasing ramp, with middle point in x=-3 and inclination 2.5
decreasing_sigmoid = DecreasingSigmoid(-3.0, 2.5)
# A generalized bell centered at x=1, with width=1.5 and exponent=4
bell = Bell(1.0, 1.5, 4.0)
# We will use the matplotlib module to plot these functions. Notice how the
# objects we instantiated before are used as functions over an array.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 6)
a1 = axes([ 0.125, 0.555, 0.775, 0.40 ])
a2 = axes([ 0.125, 0.125, 0.775, 0.40 ])
a1.hold(True)
a1.plot(x, decreasing_ramp(x))
a1.plot(x, triangle(x))
a1.plot(x, trapezoid(x))
a1.plot(x, increasing_ramp(x))
a1.set_xlim([ -5, 5 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks([])
a1.set_yticks([ 0.0, 1.0 ])
a2.hold(True)
a2.plot(x, decreasing_sigmoid(x))
a2.plot(x, gaussian(x))
a2.plot(x, bell(x))
a2.plot(x, increasing_sigmoid(x))
#a2.plot(x, rc)
a2.set_xlim([ -5, 5 ])
a2.set_ylim([ -0.1, 1.1 ])
a2.set_xticks([])
a2.set_yticks([ 0.0, 1.0 ])
savefig("membership-functions.png")
except ImportError:
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/fuzzy-c-means.py
# Basic example of Fuzzy C-Means
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import peach as p
# We create the example list (the training set) and the corresponding membership
# values for each example. There are 15 two-dimensional examples, and 15 pairs
# of membership values. This means that each example will be classified in two
# classes, with the corresponding membership values. This means that the vector
# [ 0, 0 ] will be classified with membership values 0.7 in the first class and
# membership values 0.3 in the second class and so on. Notice that, with this
# values, we expect the two centers, after the clustering, to be located at
# [ 1., 1. ] and [ 6., 6. ].
x = [ [ 0., 0. ], [ 0., 1. ], [ 0., 2. ], [ 1., 0. ], [ 1., 1. ], [ 1., 2. ],
[ 2., 0. ], [ 2., 1. ], [ 2., 2. ], [ 5., 5. ], [ 5., 6. ], [ 5., 7. ],
[ 6., 5. ], [ 6., 6. ], [ 6., 7. ], [ 7., 5. ], [ 7., 6. ], [ 7., 7. ] ]
mu = [ [ 0.7, 0.3 ], [ 0.7, 0.3 ], [ 0.7, 0.3 ], [ 0.7, 0.3 ], [ 0.7, 0.3 ],
[ 0.7, 0.3 ], [ 0.7, 0.3 ], [ 0.7, 0.3 ], [ 0.7, 0.3 ], [ 0.3, 0.7 ],
[ 0.3, 0.7 ], [ 0.3, 0.7 ], [ 0.3, 0.7 ], [ 0.3, 0.7 ], [ 0.3, 0.7 ],
[ 0.3, 0.7 ], [ 0.3, 0.7 ], [ 0.3, 0.7 ] ]
# Notice that the starting values for the memberships could be randomly choosen,
# at least for simple cases like this. You could try the lines below to
# initialize the membership array:
#
# from numpy.random import random
# mu = random((18, 1))
# mu = hstack((mu, 1.-mu))
# This parameter measures the smoothness of convergence
m = 2.0
# We create the algorithm. We must pass, in this order, the example set, the
# corresponding membership values, and the parameter `m`. This parameter is
# optional, though, and if not given, will default to 2.
fcm = p.FuzzyCMeans(x, mu, m)
# The __call__ interface runs the algorithm till completion. It returns the
# center of the classification. If we want to check the membership values for
# the vectors, the .mu instance variable can be checked. Notice that we pass the
# parameter emax = 0 to the algorithm. This is the maximum error accepted. In
# general, fuzzy c-means will converge very fastly and with little error. A
# imax parameter -- the maximum number of iterations, can also be given. If it
# isn't given, 20 iterations will be assumed.
print "After 20 iterations, the algorithm converged to the centers:"
print fcm(emax=0)
print
print "The membership values for the examples are given below:"
print fcm.mu
print | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/defuzzification.py
# Defuzzification methods
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
# The main application of fuzzy logic is in the form of fuzzy controllers. The
# last step on the control is the defuzzification, or returning from fuzzy sets
# to crisp numbers. Peach has a number of ways of dealing with that operation.
# Here we se how to do that.
# Just to illustrate the method, we will create arbitrary fuzzy sets. In a
# controller, these functions would be obtained by fuzzification and a set of
# production rules. But our intent here is to show how to use the
# defuzzification methods. Remember that instantiating Membership functions
# gives us a function, so we must apply it over our domain.
y = numpy.linspace(-30.0, 30.0, 500)
gn = Triangle(-30.0, -20.0, -10.0)(y)
pn = Triangle(-20.0, -10.0, 0.0)(y)
z = Triangle(-10.0, 0.0, 10.0)(y)
pp = Triangle(0.0, 10.0, 20.0)(y)
gp = Triangle(10.0, 20.0, 30.0)(y)
# Here we simulate the response of the production rules of a controller. In it,
# a controller will associate a membership value with every membership function
# of the output variable. Here we do that. You will notice that no membership
# values are associated with pp and gp functions. That is because we are
# supposing that they are 0, effectivelly eliminating those functions (we plot
# them anyway.
mf = gn & 0.33 | pn & 0.67 | z & 0.25
# Here are the defuzzification methods. Defuzzification methods are functions.
# They receive, as their first parameter, the membership function (or the fuzzy
# set) and as second parameter the domain of the output variable. Every method
# works that way -- and if you want to implement your own, use this signature.
# Notice that it is a simple function, not a class that is instantiated.
centroid = Centroid(mf, y) # Centroid method
bisec = Bisector(mf, y) # Bissection method
som = SmallestOfMaxima(mf, y) # Smallest of Maxima
lom = LargestOfMaxima(mf, y) # Largest of Maxima
mom = MeanOfMaxima(mf, y) # Mean of Maxima
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'defuzzification.png'.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8., 4.)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
ll = [ 0.0, 1.0 ]
a1.hold(True)
a1.plot([ centroid, centroid ], ll, linewidth = 1)
a1.plot([ bisec, bisec ], ll, linewidth = 1)
a1.plot([ som, som ], ll, linewidth = 1)
a1.plot([ lom, lom ], ll, linewidth = 1)
a1.plot([ mom, mom ], ll, linewidth = 1)
a1.plot(y, gn, 'k--')
a1.plot(y, pn, 'k--')
a1.plot(y, z, 'k--')
a1.plot(y, pp, 'k--')
a1.plot(y, gp, 'k--')
a1.fill(y, mf, 'gray')
a1.set_xlim([ -30, 30 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks(linspace(-30, 30, 7.0))
a1.set_yticks([ 0.0, 1.0 ])
a1.legend([ 'Centroid = %7.4f' % centroid,
'Bisector = %7.4f' % bisec,
'SOM = %7.4f' % som,
'LOM = %7.4f' % lom,
'MOM = %7.4f' % mom ])
savefig("defuzzification.png")
except ImportError:
print "Defuzzification results:"
print " Centroid = %7.4f" % centroid
print " Bisector = %7.4f" % bisec
print " SOM = %7.4f" % som
print " LOM = %7.4f" % lom
print " MOM = %7.4f" % mom | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/control-surface.py
# Generating the control surface for a two-variable fuzzy controller
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
from math import pi
import pylab as p
import mpl_toolkits.mplot3d as p3
# This tutorial shows how to work with a fuzzy-based controller. It is really
# easy to build a standard controller using Peach. We won't go into details of
# how a controller should work -- please, consult the literature on the subject,
# as it is very rich and explains the topic a lot better than we could do here.
#
# We will build a controller that could control an inverted pendulum. This
# controller works fine with simulations, but was never tested on a physical
# implementation. Nonetheless, it is a nice example of how to use a controller
# in Peach. We won't however, simulate it with a model of an inverted pendulum
# -- if you want to see such simulation at work, please give a look in the
# Inverted Pendulum demo.
#
# We will control the angular position and the angular velocity of the pendulum.
# To do that, we need to create the membership functions for each controlled
# variable. We will use five membership functions for the angular position
# (in general represented by theta): big negative (tbn), small negative (tsn),
# near zero (tz), small positive (tsp) and big positive (tbp). Also, we will use
# five membership functions for the angular velocity (in general represented by
# the greek letter omega): big negative (wbn), small negative (wsn), near zero
# (wz), small positive (wsp) and big positive (wbp). We define these functions
# below:
Points = 50
# Theta ranges from -pi to pi, angles given in radians.
theta = numpy.linspace(-pi, pi, Points)
tbn = DecreasingRamp(-pi/2.0, -pi/4.0)
tsn = Triangle(-pi/2.0, -pi/4.0, 0.0)
tz = Triangle(-pi/4.0, 0.0, pi/4.0)
tsp = Triangle(0.0, pi/4.0, pi/2.0)
tbp = IncreasingRamp(pi/4.0, pi/2.0)
# Omega ranges from -pi/2 to pi/2, given in radians per second.
omega = numpy.linspace(-pi/2.0, pi/2.0, Points)
wbn = DecreasingRamp(-pi/4.0, -pi/8.0)
wsn = Triangle(-pi/4.0, -pi/8.0, 0.0)
wz = Triangle(-pi/8.0, 0.0, pi/8.0)
wsp = Triangle(0.0, pi/8.0, pi/4.0)
wbp = IncreasingRamp(pi/8.0, pi/4.0)
# We also need to create membership functions to the output variable. In the
# case of the control of an inverted pendulum, this is the force applied to the
# chart. We will use, also, five membership functions, with naming similar to
# the ones above. F will range from -30 to 30 Newtons. In the case of this
# example, this range is very arbitrary, it should be adjusted for more specific
# cases.
f = numpy.linspace(-30.0, 30.0, 500)
fbn = Triangle(-30.0, -20.0, -10.0)(f)
fsn = Triangle(-20.0, -10.0, 0.0)(f)
fz = Triangle(-10.0, 0.0, 10.0)(f)
fsp = Triangle(0.0, 10.0, 20.0)(f)
fbp = Triangle(10.0, 20.0, 30.0)(f)
# Now we create the controller and input the decision rules. Rules are tipically
# given in the form of a table, if there are two variables being controlled.
# A controller in Peach has a method, add_table, that allows to give all the
# decision rules in that form. Notice, however, that single variable controllers
# should use a different method to input the rules.
#
# In the case of add_table, there are three parameters: the first one is a list
# of membership functions for the first input variable and represent the rows
# of the table; the second is a list of membership functions for the second
# variable and represents the columns of the table; the last parameter is a list
# of list that makes the table itself -- its elements are the membership
# function corresponding to the consequent of the crossing of the row and the
# column.
#
# In this example, we will use the following table:
#
# | wbn | wsn | wz | wsp | wbp
# +-----+-----+-----+-----+-----
# tbn | fbn | fbn | fbn | fsn | fz
# tsn | fbn | fbn | fsn | fz | fsp
# tz | fbn | fsn | fz | fsp | fbp
# tsp | fsn | fz | fsp | fbp | fbp
# tbp | fz | fsp | fbp | fbp | fbp
#
# Here is what these rules mean:
#
# IF Theta is tbn AND Omega is wbn THEN f is fbn
# IF Theta is tbn AND Omega is wsn THEN f is fbn
# IF Theta is tbn AND Omega is wz THEN f is fbn
# IF Theta is tbn AND Omega is wsp THEN f is fsn
# IF Theta is tbn AND Omega is wbp THEN f is fz
#
# and so on.
c = Controller(f, [], Centroid)
c.add_table([ tbn, tsn, tz, tsp, tbp ], [ wbn, wsn, wz, wsp, wbp ],
[ [ fbn, fbn, fbn, fsn, fz ],
[ fbn, fbn, fsn, fz, fsp ],
[ fbn, fsn, fz, fsp, fbp ],
[ fsn, fz, fsp, fbp, fbp ],
[ fz, fsp, fbp, fbp, fbp ] ] )
# This section of code generates the surface. This iterates over every point
# in the Theta and Omega intervals and calls the controller to receive the value
# of the output variable. That will be Points**2 samples, so it might take a
# while to compute.
fh = numpy.zeros((Points, Points))
for i in range(0, Points):
for j in range(0, Points):
t = (i - Points/2.0) / (Points / 2.0) * pi
w = (j - Points/2.0) / Points * pi
fh[i, j] = c(t, w)
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'control-surface.png'.
fig = p.figure()
a1 = p3.Axes3D(fig)
theta, omega = numpy.meshgrid(theta, omega)
a1.plot_surface(theta, omega, fh)
p.savefig("control-surface.png")
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/custom-membership.py
# Using custom membership functions
################################################################################
# Please, for more information on this demo, see the tutorial documentation.
# First, we import the needed modules
from numpy import *
import peach as p
# Peach can work with custom membership functions, if you need to use them.
# There are a number of ways of doing that. Please, use this file as a template
# to create your own.
# An existing membership function can be customized during its instantiation.
# For example, you can set parameters of an increasing ramp, starting in x = -1,
# and ending in x = 1, you can use the simple command:
CustomMembershipFunction1 = p.IncreasingRamp(-1., 1.)
# You can also create your membership function as a simple function, and turn it
# into a membership function. Let's use the ramp example as above. You can
# create a simple membership function like this:
def custom_ramp(x):
if x < -1. : return 0.
elif x > 1.: return 1.
else: return (x+1.)/2.
CustomMembershipFunction2 = p.Membership(custom_ramp)
# The last way to create a membership function is by subclassing Membership.
# To do that, you will have to implement the __init__ and __call__ methods. Use
# the code below (where we implement, again, a ramp) as a template:
class CustomMembershipFunction3(p.Membership):
'''
Don't forget to document your code!
'''
def __init__(self):
'''
We won't pass any parameter to the initializer of the class, since we
don't want further customization.
'''
p.Membership.__init__(self)
def __call__(self, x):
'''
The __call__ interface should receive a (vector of) scalar and return a
scalar. Remember that activation functions should be able to deal with
vectors, if needed, so using the ``numpy`` functions will really help!
Please consult the numpy documentation to understand what ``select``
does.
'''
s = select([ x < -1., x < 1. ], [ 0., (x+1.)/2. ], 1.)
return FuzzySet(s)
# Notice that the __call__ interface should return a FuzzySet object!
# The functions thus generated can be used in any place where a membership
# function or a membership class would be used, such as in a controller. | Python |
# -*- coding: utf-8 -*-
#####################################################################
# Peach - Python para Inteligência Computacional
# José Alexandre Nalon
#
# Este arquivo: demo07.py
# Demonstração e teste, Mapeamento de uma função não linear.
#####################################################################
from numpy import *
import random
import peach as p
# Explicação deste demo.
#
# É possível utilizar uma rede neural para fazer o mapeamento
# de uma função não linear, como uma senóide ou outra seme-
# lhante. A técnica vai exigir uma rede neural mais complexa,
# com uma entrada, mas com uma camada escondida relativamente
# complexa. A camada de saída deve ter como função de ativação
# a identidade, para somar os mapeamentos realizados.
# Criamos aqui a rede neural. N é a ordem do polinômio,
# que deixamos indicada na forma de uma variável para
# permitir fáceis adaptações. A função de ativação é a
# identidade, e o método de aprendizado é o back-propa-
# gation (por default).
# Utilizamos várias saídas, igualmente distribuídas ao
# redor do ponto de avaliação para que o erro obtido seja
# mais significativo, onde existir. Nesse caso, o ponto de
# avaliação será igual a int(inputs/2). O uso de uma vizi-
# nhança maior possibilitará melhores resultados.
inputs = 7
nn = p.FeedForward((inputs, 200, inputs), lrule=p.BackPropagation(0.01), bias=True)
nn.phi = (p.Sigmoid, p.Linear)
delta = linspace(-0.1, 0.1, inputs)
elog = [ ]
error = 1
i = 0
while i < 2000:
# Geramos um valor de x e um valor da resposta
# desejada. Com x, encontramos xo, que será o
# vetor de entrada da rede neural:
xo = random.uniform(-1.0, 1.0)
x = xo + delta
d = sin(pi*x)
# Fazemos a predição, calculamos o erro e realizamos
# o aprendizado da rede.
y = nn(x)
error = nn.learn(x, d)
elog.append(error)
# Incrementamos o contador de tentativas.
i = i + 1
# Se o sistema tiver o pacote gráfico matplotlib instalado,
# então o demo tenta criar um gráfico da função original,
# contrastada com a função predita. O gráfico é salvo no
# arquivo demo07.eps.
try:
from matplotlib import *
from matplotlib.pylab import *
x = linspace(-1, 1, 200)
y = sin(pi*x)
ye = [ ]
for xo in x:
yn = nn(delta + xo)
ye.append(yn[int(inputs/2)])
ye = array(ye)
subplot(211)
hold(True)
grid(True)
plot(x, y, 'b--')
plot(x, ye, 'g')
xlim([ -1, 1 ])
legend([ "$y$", "$\hat{y}$" ])
subplot(212)
grid(True)
plot(arange(0, 2000, 10), array(elog, dtype=float)[::10])
savefig("demo07.eps")
except ImportError:
pass
| Python |
# -*- coding: utf-8 -*-
################################################################################
# Widget to draw the flock.
# Jose Alexandre Nalon
#
# Date: 22-10-2007
################################################################################
################################################################################
# Used modules
################################################################################
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
################################################################################
# Classes
################################################################################
class FlockView(QGraphicsView):
"""
Visualization of flock.
"""
def __init__(self, spot, flock, *cnf):
QGraphicsView.__init__(self, *cnf)
self.__xsize = self.width() - 150
self.__ysize = self.height() - 150
self.__set_scale()
self.__create_gs()
self.__create_objects(len(flock))
self.__spot = spot
self.__flock = flock
self.set_state(spot, flock)
self.show()
def __set_scale(self):
xmin = -0.1
ymin = -0.1
xmax = 1.1
ymax = 1.1
self.__ay = - float(self.__ysize) / (ymax - ymin)
self.__by = - self.__ay * ymax
self.__ax = - self.__ay # Same scale for x-axis
self.__bx = - self.__ax * xmin
def __create_gs(self):
self.gs = QGraphicsScene(0, 0, self.__xsize, self.__ysize)
self.gs.setBackgroundBrush(QBrush(QColor(0, 0, 0)))
self.setScene(self.gs)
def __create_objects(self, n_points):
self.__dots = [ ]
# Draws the spot to where the birds must converge
s_pen = QPen(QColor(192, 0, 0), 2)
s_brush = QBrush(QColor(192, 0, 0))
s = self.gs.addEllipse(QRectF(0, 0, 1, 1), s_pen, s_brush)
s.setZValue(300)
s.show()
self.__dots.append(s)
# Draws one small circle for the flock
b_pen = QPen(QColor(255, 255, 255), 2)
b_brush = QBrush(QColor(255, 255, 255))
for i in range(n_points):
b = self.gs.addEllipse(QRectF(0, 0, 1, 1), b_pen, b_brush)
b.setZValue(301+i)
b.show()
self.__dots.append(b)
def set_state(self, spot, flock):
self.__spot = spot
self.__flock = flock
radius = 5
# Updates the spot
sx, sy = self.__transform(spot[0], spot[1])
self.__dots[0].setRect(sx, sy, radius, radius)
# Updates birds in the flock
radius = 5
for p, b in zip(flock, self.__dots[1:]):
fx, fy = self.__transform(p[0], p[1])
b.setRect(fx, fy, radius, radius)
def __transform(self, x, y):
'''
Transforms a pair of real world coordinates to screen coordinates.
'''
xr = int(self.__ax * x + self.__bx)
yr = int(self.__ay * y + self.__by)
return (xr, yr)
def resizeEvent(self, event):
self.__xsize = event.size().width()
self.__ysize = event.size().height()
self.__set_scale()
self.set_state(self.__spot, self.__flock)
################################################################################
| Python |
# -*- coding: utf-8 -*-
################################################################################
# A Demonstration of Swarm in Action
# Jose Alexandre Nalon
#
# Date: 22-10-2007
# This is the main program
################################################################################
# Obs.: Since this is a standalone program, we do not document using the
# epydoc API. So, no documentation for classes, etc. This program is
# documented, however, using a Sphinx interface. Please, consult the
# standard module documentation.
################################################################################
# Used modules
################################################################################
# We use the PyQt4 toolkit
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
from time import sleep
from qtflock import *
from peach import *
from numpy.random import random
################################################################################
# Classes
################################################################################
class ControlFrame(QGroupBox):
'''
This frame shows the application control buttons.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Control:")
self.go_button = QPushButton("Start", self)
self.stop_button = QPushButton("Stop", self)
self.step_button = QPushButton("Step", self)
self.change_button = QPushButton("Change Spot", self)
self.reset_button = QPushButton("Reset", self)
self.delay_label = QLabel("Delay (ms):", self)
self.delay_edit = QLineEdit(self)
self.delay_edit.setText('100')
layout = QVBoxLayout(self)
layout.setSpacing(0)
layout.addWidget(self.go_button, Qt.AlignLeft)
layout.addWidget(self.stop_button, Qt.AlignLeft)
layout.addWidget(self.step_button, Qt.AlignLeft)
layout.addWidget(self.change_button, Qt.AlignLeft)
layout.addWidget(self.reset_button, Qt.AlignLeft)
layout.addWidget(self.delay_label, Qt.AlignLeft)
layout.addWidget(self.delay_edit, Qt.AlignLeft)
self.enable()
self.show()
def enable(self):
self.go_button.setEnabled(True)
self.stop_button.setEnabled(False)
self.step_button.setEnabled(True)
self.reset_button.setEnabled(True)
self.delay_edit.setEnabled(True)
def disable(self):
self.go_button.setEnabled(False)
self.stop_button.setEnabled(True)
self.step_button.setEnabled(False)
self.reset_button.setEnabled(False)
self.delay_edit.setEnabled(False)
################################################################################
# Function to represent the searched spot. Implemented in the form of a class
# to allow dynamic change of parameters.
class Function(object):
def __init__(self):
self.center = array([ [ 0.5 ], [ 0.5 ] ])
self.offset = 0.
def __call__(self, x):
x = x - self.center
v = sum(x*x) - self.offset
return v
################################################################################
class FlockFrame(QFrame):
'''
Shows every control and process events.
'''
def __init__(self, app, *cnf):
# Optimizer initialization
self.__fn = Function()
self.__spot = random((2, ))
self.__fn.center = self.__spot
flock = random((10, 2)) # First estimates
ranges = [ (0., 1.), (0., 1.) ]
self.pso = ParticleSwarmOptimizer(self.__fn, flock, ranges)
self.running = False
self.count = 0
# Frame Inicialization
QFrame.__init__(self, *cnf)
self.app = app
self.setWindowTitle("Flock of Birds")
# Graphic Elements
self.flock_view = FlockView(self.__spot, flock)
self.ctrl_frame = ControlFrame(self)
layout = QGridLayout(self)
layout.addWidget(self.flock_view, 0, 0, 2, 1)
layout.addWidget(self.ctrl_frame, 0, 1)
layout.setRowStretch(0, 0)
layout.setRowStretch(1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 0)
# Connects the events
self.connect(self.ctrl_frame.go_button, SIGNAL("clicked()"), self.on_go_button)
self.connect(self.ctrl_frame.stop_button, SIGNAL("clicked()"), self.on_stop_button)
self.connect(self.ctrl_frame.step_button, SIGNAL("clicked()"), self.on_step_button)
self.connect(self.ctrl_frame.change_button, SIGNAL("clicked()"), self.on_change_button)
self.connect(self.ctrl_frame.reset_button, SIGNAL("clicked()"), self.on_reset_button)
self.show()
def enable(self):
self.ctrl_frame.enable()
def disable(self):
self.ctrl_frame.disable()
def set_state(self, spot, flock):
self.flock_view.set_state(spot, flock)
def reset(self):
self.__spot = random((2, ))
self.__fn.center = self.__spot
self.__fn.offset = self.__fn.offset + 0.25
flock = random((10, 2))
#self.pso.reset(flock)
self.pso[:] = flock[:]
self.set_state(self.__spot, flock)
def step(self):
self.pso.step()
self.count = self.count + 1
if self.count % 73 == 0:
self.reset()
else:
self.set_state(self.__spot, self.pso[:])
def on_go_button(self):
self.disable()
self.running = True
try:
delay = int(self.ctrl_frame.delay_edit.text()) / 1000.
except ValueError:
delay = 0
while self.running:
self.step()
self.app.processEvents()
sleep(delay)
self.enable()
def on_stop_button(self):
self.running = False
def on_step_button(self):
if self.running:
return
self.step()
def on_change_button(self):
self.__spot = random((2, ))
self.__fn.center = self.__spot
self.set_state(self.__spot, self.pso[:])
def on_reset_button(self):
if self.running:
return
self.reset()
def closeEvent(self, event):
self.on_stop_button()
self.app.exit(0)
################################################################################
# Main Program
################################################################################
if __name__ == "__main__":
q = QApplication([])
f = FlockFrame(q, None)
q.exec_()
| Python |
# -*- coding: utf-8 -*-
################################################################################
# General graph plotting widget for the PyQt4 toolkit
# Some of this was taken from a page that I didn't register the link to.
# probably the Qwt page itself.
# Jose Alexandre Nalon
#
# Date: 28-01-2008
# Graphic plotting
################################################################################
################################################################################
# Used modules
################################################################################
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import QEvent
import PyQt4.Qwt5 as Qwt
# NumPy is essential to plotting.
from numpy import *
################################################################################
# Classes
################################################################################
class LRPlotWindow(Qwt.QwtPlot):
def __init__(self, xlim, ylim, *args):
'''
Initializes the graph plotting. The usual parameters are available.
:Parameters:
nplots
Number of plots in the same window.
'''
Qwt.QwtPlot.__init__(self, *args)
self.set_scale(xlim, ylim)
self.setCanvasBackground(Qt.white)
grid = Qwt.QwtPlotGrid()
grid.attach(self)
grid.setMajPen(QPen(Qt.black, 0, Qt.DotLine))
self.lr = Qwt.QwtPlotCurve('')
self.lr.attach(self)
self.lr.setPen(QPen(Qt.darkYellow))
self.lr.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
scatter_symbol = Qwt.QwtSymbol(Qwt.QwtSymbol.Ellipse,
QBrush(Qt.white), QPen(Qt.darkCyan), QSize(9, 9))
self.scatter = Qwt.QwtPlotCurve('')
self.scatter.attach(self)
self.scatter.setPen(QPen(Qt.NoPen))
self.scatter.setSymbol(scatter_symbol)
self.scatter.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
example_symbol = Qwt.QwtSymbol(Qwt.QwtSymbol.Ellipse,
QBrush(Qt.red), QPen(Qt.red), QSize(9, 9))
self.example = Qwt.QwtPlotCurve('')
self.example.attach(self)
self.example.setPen(QPen(Qt.NoPen))
self.example.setSymbol(example_symbol)
self.example.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
def set_scale(self, xlim, ylim):
'''
Set boundaries of the plots
'''
self.xmin, self.xmax = xlim
self.ymin, self.ymax = ylim
self.setAxisScale(2, self.xmin, self.xmax)
self.setAxisScale(0, self.ymin, self.ymax)
def setLRData(self, x, y):
'''
Sets data on the line plot
:Parameters:
x
horizontal data
y
vertical data
'''
x = array(x)
y = array(y)
self.lr.setData(x, y)
#self.replot()
def setScatterData(self, x, y):
'''
Sets data on the scatter plot
:Parameters:
x
horizontal data
y
vertical data
'''
x = array(x)
y = array(y)
self.scatter.setData(x, y)
#self.replot()
def setExampleData(self, x, y):
'''
Sets data on the example plot
:Parameters:
x
horizontal data
y
vertical data
'''
x = array([ x, x ])
y = array([ y, y ])
self.example.setData(x, y)
#self.replot()
################################################################################
class PlotWindow(Qwt.QwtPlot):
def __init__(self, *args):
'''
Initializes the graph plotting. The usual parameters are available.
:Parameters:
nplots
Number of plots in the same window.
'''
Qwt.QwtPlot.__init__(self, *args)
self.setCanvasBackground(Qt.white)
grid = Qwt.QwtPlotGrid()
grid.attach(self)
grid.setMajPen(QPen(Qt.black, 0, Qt.DotLine))
self.curve = Qwt.QwtPlotCurve('')
self.curve.attach(self)
self.curve.setPen(QPen(Qt.darkYellow))
self.curve.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
def setData(self, y):
'''
Sets data on the plots
:Parameters:
y
Vertical data
'''
x = arange(0, len(y))
y = array(y)
self.curve.setData(x, y)
self.replot()
################################################################################
| Python |
# -*- coding: utf-8 -*-
################################################################################
# Simulation of linear regression using a single linear neuron
# Jose Alexandre Nalon
#
# Date: 14-11-2011
# This is the main program
################################################################################
# Obs.: Since this is a standalone program, we do not document using the
# epydoc API. So, no documentation for classes, etc. This program is
# documented, however, using a Sphinx interface. Please, consult the
# standard module documentation.
################################################################################
# Used modules
################################################################################
# We use the PyQt4 toolkit
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy.random import uniform, standard_normal
from plot import *
from peach import *
import time
################################################################################
# Classes
################################################################################
class ControlFrame(QGroupBox):
'''
This frame shows the application control buttons.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Control:")
self.go_button = QPushButton("Start", self)
self.stop_button = QPushButton("Stop", self)
self.step_button = QPushButton("Step", self)
self.delay_label = QLabel(u"Delay: ", self)
self.delay_mag = QSpinBox(self)
self.delay_mag.setMinimum(0)
self.delay_mag.setMaximum(10000)
self.delay_mag.setValue(1000)
self.delay_mag.setSuffix(" ms")
layout = QVBoxLayout(self)
layout.setSpacing(0)
layout.addWidget(self.go_button, Qt.AlignLeft)
layout.addWidget(self.stop_button, Qt.AlignLeft)
layout.addWidget(self.step_button, Qt.AlignLeft)
layout.addWidget(self.delay_label, Qt.AlignLeft)
layout.addWidget(self.delay_mag, Qt.AlignLeft)
self.enable()
self.show()
def enable(self):
self.go_button.setEnabled(True)
self.stop_button.setEnabled(False)
self.step_button.setEnabled(True)
self.delay_mag.setEnabled(True)
def disable(self):
self.go_button.setEnabled(False)
self.stop_button.setEnabled(True)
self.step_button.setEnabled(False)
self.delay_mag.setEnabled(False)
def get_delay(self):
return self.delay_mag.value()
################################################################################
class RedefFrame(QGroupBox):
'''
This frame shows controls to reset and redefine the variables on the
control.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Reset:")
self.a0_label = QLabel("a0: ")
self.a1_label = QLabel("a1: ")
self.lrate_label = QLabel("Learning Rate: ")
self.a0_edit = QLineEdit(self)
self.a1_edit = QLineEdit(self)
self.lrate_edit = QLineEdit(self)
self.redef_button = QPushButton("Reset", self)
layout = QGridLayout(self)
layout.setSpacing(0)
layout.addWidget(self.a0_label, 0, 0, 1, 1)
layout.addWidget(self.a1_label, 1, 0, 1, 1)
layout.addWidget(self.lrate_label, 2, 0, 1, 1)
layout.addWidget(self.a0_edit, 0, 1, 1, 1)
layout.addWidget(self.a1_edit, 1, 1, 1, 1)
layout.addWidget(self.lrate_edit, 2, 1, 1, 1)
layout.addWidget(self.redef_button, 3, 0, 1, 2)
self.enable()
self.show()
def enable(self):
self.a0_edit.setEnabled(True)
self.a1_edit.setEnabled(True)
self.lrate_edit.setEnabled(True)
self.redef_button.setEnabled(True)
def disable(self):
self.a0_edit.setEnabled(False)
self.a1_edit.setEnabled(False)
self.lrate_edit.setEnabled(False)
self.redef_button.setEnabled(False)
def feedback(self, a0, a1, lrate):
self.a0_edit.setText("%7.4f" % a0)
self.a1_edit.setText("%7.4f" % a1)
self.lrate_edit.setText("%7.4f" % lrate)
def get_values(self):
a0 = float(self.a0_edit.text())
a1 = float(self.a1_edit.text())
lrate = float(self.lrate_edit.text())
return (a0, a1, lrate)
################################################################################
class ExampleFrame(QGroupBox):
'''
This frame shows the example being presented to the neuron at each step.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Example:")
self.x_label = QLabel("x: ")
self.y_label = QLabel("y: ")
self.x_edit = QLineEdit(self)
self.y_edit = QLineEdit(self)
layout = QGridLayout(self)
layout.setSpacing(0)
layout.addWidget(self.x_label, 0, 0, 1, 1)
layout.addWidget(self.y_label, 1, 0, 1, 1)
layout.addWidget(self.x_edit, 0, 1, 1, 1)
layout.addWidget(self.y_edit, 1, 1, 1, 1)
self.x_edit.setEnabled(False)
self.y_edit.setEnabled(False)
self.show()
def feedback(self, x, y):
self.x_edit.setText("%7.4f" % x)
self.y_edit.setText("%7.4f" % y)
################################################################################
class ModelFrame(QGroupBox):
'''
This frame shows the parameters obtained by presenting examples from the
training set.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Model:")
self.w0_label = QLabel("w0: ")
self.w1_label = QLabel("w1: ")
self.y_label = QLabel("Estimate: ")
self.e_label = QLabel("Error: ")
self.w0_edit = QLineEdit(self)
self.w1_edit = QLineEdit(self)
self.y_edit = QLineEdit(self)
self.e_edit = QLineEdit(self)
layout = QGridLayout(self)
layout.setSpacing(0)
layout.addWidget(self.w0_label, 0, 0, 1, 1)
layout.addWidget(self.w1_label, 1, 0, 1, 1)
layout.addWidget(self.y_label, 2, 0, 1, 1)
layout.addWidget(self.e_label, 3, 0, 1, 1)
layout.addWidget(self.w0_edit, 0, 1, 1, 1)
layout.addWidget(self.w1_edit, 1, 1, 1, 1)
layout.addWidget(self.y_edit, 2, 1, 1, 1)
layout.addWidget(self.e_edit, 3, 1, 1, 1)
self.w0_edit.setEnabled(False)
self.w1_edit.setEnabled(False)
self.y_edit.setEnabled(False)
self.e_edit.setEnabled(False)
self.show()
def feedback(self, w0, w1, y, e):
self.w0_edit.setText("%7.4f" % w0)
self.w1_edit.setText("%7.4f" % w1)
self.y_edit.setText("%7.4f" % y)
self.e_edit.setText("%7.4f" % e)
################################################################################
class LRFrame(QFrame):
'''
Shows every control and process events.
'''
def __init__(self, app, *cnf):
# Coefficients of the model
self.a0 = -0.5
self.a1 = 0.75
self.lrate = 0.5
# Neuron. The learning parameter is made huge to make more obvious the
# updating of the neuron.
self.nn = FeedForward((1, 1), lrule=LMS(self.lrate), bias=True)
self.error_log = [ ]
self.x_log = [ ]
self.y_log = [ ]
# Control
self.running = False
# Frame Inicialization
QFrame.__init__(self, *cnf)
self.app = app
self.setWindowTitle("Linear Regression")
# Graphic Elements
self.graph = LRPlotWindow((-1., 1.), (self.a0-self.a1, self.a0+self.a1))
self.error = PlotWindow()
self.ctrl_frame = ControlFrame(self)
self.redef_frame = RedefFrame(self)
self.example_frame = ExampleFrame(self)
self.model_frame = ModelFrame(self)
# Tabs
self.tabs = QTabWidget()
self.tabs.addTab(self.graph, 'Model')
self.tabs.addTab(self.error, 'Error')
layout = QGridLayout(self)
layout.addWidget(self.tabs, 0, 0, 5, 1)
layout.addWidget(self.ctrl_frame, 0, 1)
layout.addWidget(self.redef_frame, 1, 1)
layout.addWidget(self.example_frame, 2, 1)
layout.addWidget(self.model_frame, 3, 1)
layout.setRowStretch(0, 0)
layout.setRowStretch(1, 0)
layout.setRowStretch(2, 0)
layout.setRowStretch(3, 0)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 0)
# Connects the events
self.connect(self.ctrl_frame.go_button, SIGNAL("clicked()"), self.on_go_button)
self.connect(self.ctrl_frame.stop_button, SIGNAL("clicked()"), self.on_stop_button)
self.connect(self.ctrl_frame.step_button, SIGNAL("clicked()"), self.on_step_button)
self.connect(self.redef_frame.redef_button, SIGNAL("clicked()"), self.on_redef_button)
# Shows the frame
w0, w1 = self.nn[0].weights[0]
self.redef_frame.feedback(self.a0, self.a1, self.lrate)
self.example_frame.feedback(0, 0)
self.model_frame.feedback(w0, w1, 0, 0)
self.show()
def enable(self):
self.ctrl_frame.enable()
self.redef_frame.enable()
def disable(self):
self.ctrl_frame.disable()
self.redef_frame.disable()
def feedback(self):
w0, w1 = self.nn[0].weights[0]
x = self.x_log[-1]
y = self.y_log[-1]
ye = self.nn(array([ x ]))
self.example_frame.feedback(x, y)
self.model_frame.feedback(w0, w1, ye, y-ye)
self.graph.setLRData([ -1., 1. ], [ w0-w1, w0+w1 ])
if len(self.x_log) > 0:
self.graph.setExampleData(self.x_log[-1], self.y_log[-1])
self.graph.replot()
if len(self.x_log) > 1:
self.graph.setScatterData(self.x_log, self.y_log)
self.graph.replot()
self.error.setData(array(self.error_log))
self.error.replot()
def step(self):
x = uniform(-1., 1.) # Generates an example
y = self.a0 + self.a1*x # Line equation
y = y + 0.05*standard_normal() # Adds noise
self.x_log.append(x)
self.y_log.append(y)
self.feedback() # Feedback made before updating the neuron
self.error_log.append(self.nn.feed(array([ x ]), y))
def on_go_button(self):
self.disable()
self.running = True
delay = self.ctrl_frame.get_delay() / 1000.0
while self.running:
self.step()
self.app.processEvents()
time.sleep(delay)
self.enable()
def on_stop_button(self):
self.running = False
def on_step_button(self):
if self.running:
return
self.step()
def on_redef_button(self):
if self.running:
return
self.a0, self.a1, self.lrate = self.redef_frame.get_values()
if self.a1 > 0:
self.graph.set_scale((-1., 1.), (self.a0-self.a1, self.a0+self.a1))
elif self.a1 < 0:
self.graph.set_scale((-1., 1.), (self.a0+self.a1, self.a0-self.a1))
self.graph.replot()
# Not possible to change the learning rate right now. Going to TODO.
def closeEvent(self, event):
self.on_stop_button()
self.app.exit(0)
################################################################################
# Main Program
################################################################################
if __name__ == "__main__":
q = QApplication([])
f = LRFrame(q, None)
q.exec_()
| Python |
# -*- coding: utf-8 -*-
################################################################################
# General graph plotting widget for the PyQt4 toolkit
# Some of this was taken from a page that I didn't register the link to.
# probably the Qwt page itself.
# Jose Alexandre Nalon
#
# Date: 28-01-2008
# Graphic plotting
################################################################################
################################################################################
# Used modules
################################################################################
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qt import QEvent
import PyQt4.Qwt5 as Qwt
# NumPy is essential to plotting.
from numpy import *
################################################################################
# Classes
################################################################################
class PlotWindow(Qwt.QwtPlot):
def __init__(self, nplots, *args):
'''
Initializes the graph plotting. The usual parameters are available.
:Parameters:
nplots
Number of plots in the same window.
'''
Qwt.QwtPlot.__init__(self, *args)
self.setCanvasBackground(Qt.white)
grid = Qwt.QwtPlotGrid()
grid.attach(self)
grid.setMajPen(QPen(Qt.black, 0, Qt.DotLine))
self.__nplots = nplots
self.__curves = [ ]
colors = [ Qt.red, Qt.darkCyan, Qt.green, Qt.darkYellow, Qt.cyan, Qt.magenta ]
for i in xrange(nplots):
new_curve = Qwt.QwtPlotCurve('')
new_curve.attach(self)
new_curve.setPen(QPen(colors[i%6]))
new_curve.setRenderHint(Qwt.QwtPlotItem.RenderAntialiased)
self.__curves.append(new_curve)
def setCurveColor(self, i, color):
'''
Sets the color of a given plot.
'''
self.__curves[i].setPen(QPen(color))
def setCurveStyle(self, i, estilo):
'''
Sets the style of a given plot.
'''
self.__curves[i].pen().setStyle(estilo)
def setCurveBaseline(self, i, ref):
'''
Sets the baseline of a given plot.
'''
self.__curves[i].setBaseline(ref)
def setCurveBrush(self, i, brush):
'''
Sets the brush of a given plot.
'''
self.__curves[i].setBrush(brush)
def setData(self, i, x, y):
'''
Plots the x, y data in the ith plot.
:Parameters:
i
Number of the plot to be drawn.
x
Horizontal coordinates
y
Vertical coordinates
'''
x = array(x)
y = array(y)
self.__curves[i].setData(x, y)
self.replot()
def setMultiData(self, xy):
'''
Plots the data in the set.
:Parameters:
xy
List of two-tuples, where xy[:, 0] is the horizontal coordinate, and
xy[:, 1] is the vertical coordinate.
'''
n = len(xy)
if n != self.__nplots:
raise ValueError, "data and plots not equal"
for i in xrange(n):
x = array(xy[i][0])
y = array(xy[i][1])
self.__curves[i].setData(x, y)
self.replot()
################################################################################
| Python |
# -*- coding: utf-8 -*-
################################################################################
# A Complete Simulation of a Inverted Pendulum, controlled with a
# Fuzzy Logic Controller
# Jose Alexandre Nalon
#
# Date: 06-12-2007
# This is the main program
################################################################################
# Obs.: Since this is a standalone program, we do not document using the
# epydoc API. So, no documentation for classes, etc. This program is
# documented, however, using a Sphinx interface. Please, consult the
# standard module documentation.
################################################################################
# Used modules
################################################################################
# We use the PyQt4 toolkit
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ip
from qtip import *
from plot import *
from peach import *
################################################################################
# Classes
################################################################################
class ControlFrame(QGroupBox):
'''
This frame shows the application control buttons.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Control:")
self.go_button = QPushButton("Start", self)
self.stop_button = QPushButton("Stop", self)
self.step_button = QPushButton("Step", self)
layout = QVBoxLayout(self)
layout.setSpacing(0)
layout.addWidget(self.go_button, Qt.AlignLeft)
layout.addWidget(self.stop_button, Qt.AlignLeft)
layout.addWidget(self.step_button, Qt.AlignLeft)
self.enable()
self.show()
def enable(self):
self.go_button.setEnabled(True)
self.stop_button.setEnabled(False)
self.step_button.setEnabled(True)
def disable(self):
self.go_button.setEnabled(False)
self.stop_button.setEnabled(True)
self.step_button.setEnabled(False)
################################################################################
class RedefFrame(QGroupBox):
'''
This frame shows controls to reset and redefine the variables on the
control.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Reset:")
self.theta_label = QLabel("Theta: ")
self.omega_label = QLabel("Omega: ")
self.x_label = QLabel("Position: ")
self.v_label = QLabel("Speed: ")
self.theta_edit = QLineEdit(self)
self.omega_edit = QLineEdit(self)
self.x_edit = QLineEdit(self)
self.v_edit = QLineEdit(self)
self.redef_button = QPushButton("Reset", self)
layout = QGridLayout(self)
layout.setSpacing(0)
layout.addWidget(self.theta_label, 0, 0, 1, 1)
layout.addWidget(self.omega_label, 1, 0, 1, 1)
layout.addWidget(self.x_label, 2, 0, 1, 1)
layout.addWidget(self.v_label, 3, 0, 1, 1)
layout.addWidget(self.theta_edit, 0, 1, 1, 1)
layout.addWidget(self.omega_edit, 1, 1, 1, 1)
layout.addWidget(self.x_edit, 2, 1, 1, 1)
layout.addWidget(self.v_edit, 3, 1, 1, 1)
layout.addWidget(self.redef_button, 4, 0, 1, 2)
self.enable()
self.show()
def enable(self):
self.theta_edit.setEnabled(True)
self.omega_edit.setEnabled(True)
self.x_edit.setEnabled(True)
self.v_edit.setEnabled(True)
self.redef_button.setEnabled(True)
def disable(self):
self.theta_edit.setEnabled(False)
self.omega_edit.setEnabled(False)
self.x_edit.setEnabled(False)
self.v_edit.setEnabled(False)
self.redef_button.setEnabled(False)
def feedback(self, O, w, x, v, F):
self.theta_edit.setText("%5.2f" % (O*180./pi))
self.omega_edit.setText("%7.4f" % w)
self.x_edit.setText("%7.4f" % x)
self.v_edit.setText("%7.4f" % v)
def get_values(self):
O = float(self.theta_edit.text()) * pi/180.
w = float(self.omega_edit.text())
x = float(self.x_edit.text())
v = float(self.v_edit.text())
return (O, w, x, v)
################################################################################
class ConfigFrame(QGroupBox):
'''
This frame shows the redefinitions allowed for the controller. You can
select different defuzzification, logic or inference operations.
'''
def __init__(self, *cnf):
QGroupBox.__init__(self, *cnf)
self.setTitle("Configuration:")
self.logic_label = QLabel("Fuzzy Logic:")
self.logic_combo = QComboBox(self)
self.logic_combo.addItems([ "Zadeh", "Probabilistic", "Einstein",
"Drastic" ])
self.infer_label = QLabel("Inference:")
self.infer_combo = QComboBox(self)
self.infer_combo.addItems([ "Mamdani", "Probabilistic", "Zadeh/Mamdani",
"Dienes-Rescher/Mamdani", "Lukasiewicz/Mamdani", "Godel/Mamdani" ])
self.defuzzy_label = QLabel("Defuzzification:")
self.defuzzy_combo = QComboBox(self)
self.defuzzy_combo.addItems([ "Centroid", "Bisector", "SOM", "LOM", "MOM" ])
layout = QGridLayout(self)
layout.setSpacing(0)
layout.addWidget(self.logic_label, 0, 0)
layout.addWidget(self.logic_combo, 0, 1)
layout.addWidget(self.infer_label)
layout.addWidget(self.infer_combo)
layout.addWidget(self.defuzzy_label, 2, 0)
layout.addWidget(self.defuzzy_combo, 2, 1)
self.enable()
self.show()
def enable(self):
self.logic_combo.setEnabled(True)
self.infer_combo.setEnabled(True)
self.defuzzy_combo.setEnabled(True)
def disable(self):
self.logic_combo.setEnabled(False)
self.infer_combo.setEnabled(False)
self.defuzzy_combo.setEnabled(False)
################################################################################
class IPFrame(QFrame):
'''
Shows every control and process events.
'''
def __init__(self, app, *cnf):
# Pendulum data (MKS units)
l = 0.5
m = 0.1
mc = 0.5
dt = 0.01
self.ip = ip.InvertedPendulum(l, m, mc, dt)
self.pc = ip.PendulumController
self.running = False
self.Orange = linspace(-3.*pi/8., 3.*pi/8., 100)
self.wrange = linspace(-9.*pi/2., 9.*pi/2., 100)
self.F = 0.
self.Otrack = [ ]
self.wtrack = [ ]
self.xtrack = [ ]
self.vtrack = [ ]
self.Ftrack = [ ]
# Frame Inicialization
QFrame.__init__(self, *cnf)
self.app = app
self.setWindowTitle("Inverted Pendulum")
# Graphic Elements
self.ipview = PendulumView(l, m)
self.graph = PlotWindow(5)
self.ctrl_frame = ControlFrame(self)
self.redef_frame = RedefFrame(self)
self.config_frame = ConfigFrame(self)
self.rule_label = QLabel("Show Rule:")
self.rule_combo = QComboBox(self)
self.rule_combo.addItems([
'MGN & GN -> MMGN', 'MGN & PN -> MMGN', 'MGN & Z -> MGN', 'MGN & PP -> GN', 'MGN & GP -> PN',
'GN & GN -> MMGN', 'GN & PN -> MGN', 'GN & Z -> GN', 'GN & PP -> PN', 'GN & GP -> Z',
'PN & GN -> MGN', 'PN & PN -> GN', 'PN & Z -> PN', 'PN & PP -> Z', 'GN & GP -> PP',
'Z & GN -> GN', 'Z & PN -> PN', 'Z & Z -> Z', 'Z & PP -> PP', 'Z & GP -> GP',
'PP & GN -> PN', 'PP & PN -> Z', 'PP & Z -> PP', 'PP & PP -> GP', 'PP & GP -> MGP',
'GP & GN -> Z', 'GP & PN -> PP', 'GP & Z -> GP', 'GP & PP -> MGP', 'GP & GP -> MMGP',
'MGP & GN -> PP', 'MGP & PN -> GP', 'MGP & Z -> MGP', 'MGP & PP -> MMGP', 'MGP & GP -> MMGP'
])
self.rule_combo.setCurrentIndex(17) # Z & Z -> Z
self.rule_combo.setEnabled(False)
# Plots
self.gframe = QFrame(self)
self.Ograph = PlotWindow(8, self.gframe)
self.Ograph.setAxisScale(Qwt.QwtPlot.xBottom, -3*pi/8, 3*pi/8)
self.Ograph.setAxisScale(Qwt.QwtPlot.yLeft, -0.1, 1.1)
self.Ograph.setCurveColor(-1, Qt.black)
for i in range(7):
self.Ograph.setCurveStyle(i, Qt.DotLine)
self.wgraph = PlotWindow(6, self.gframe)
self.wgraph.setAxisScale(Qwt.QwtPlot.xBottom, -9*pi/2., 9*pi/2.)
self.wgraph.setAxisScale(Qwt.QwtPlot.yLeft, -0.1, 1.1)
self.wgraph.setCurveColor(-1, Qt.black)
for i in range(5):
self.wgraph.setCurveStyle(i, Qt.DotLine)
self.Fgraph = PlotWindow(12, self.gframe)
self.Fgraph.setAxisScale(Qwt.QwtPlot.xBottom, -100., 100.)
self.Fgraph.setAxisScale(Qwt.QwtPlot.yLeft, -0.1, 1.1)
self.Fgraph.setCurveColor(0, Qt.darkGray)
self.Fgraph.setCurveBaseline(0, 0.)
self.Fgraph.setCurveBrush(0, QBrush(Qt.gray, Qt.SolidPattern))
self.Fgraph.setCurveColor(1, Qt.black)
self.Fgraph.setCurveBaseline(1, 0.)
self.Fgraph.setCurveBrush(1, QBrush(Qt.darkGray, Qt.SolidPattern))
self.Fgraph.setCurveColor(2, Qt.red)
glayout = QGridLayout(self.gframe)
glayout.addWidget(self.Ograph, 0, 0)
glayout.addWidget(self.wgraph, 1, 0)
glayout.addWidget(self.Fgraph, 0, 1, 2, 1)
glayout.setRowStretch(0, 1)
glayout.setRowStretch(1, 1)
glayout.setColumnStretch(0, 1)
glayout.setColumnStretch(1, 2)
self.__drawO()
self.__draww()
self.__drawF()
self.gframe.setLayout(glayout)
# Tabs
self.tabs = QTabWidget()
self.tabs.addTab(self.ipview, 'Pendulum')
self.tabs.addTab(self.graph, 'Graphics')
self.tabs.addTab(self.gframe, 'Membership')
layout = QGridLayout(self)
layout.addWidget(self.tabs, 0, 0, 5, 1)
layout.addWidget(self.ctrl_frame, 0, 1)
layout.addWidget(self.redef_frame, 1, 1)
layout.addWidget(self.config_frame, 2, 1)
layout.addWidget(self.rule_label, 3, 1)
layout.addWidget(self.rule_combo, 4, 1)
layout.setRowStretch(0, 0)
layout.setRowStretch(1, 0)
layout.setRowStretch(2, 0)
layout.setRowStretch(3, 0)
layout.setRowStretch(4, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 0)
self.feedback(O=0., w=0., x=0., v=0., F=0.)
# Connects the events
self.connect(self.ctrl_frame.go_button, SIGNAL("clicked()"), self.on_go_button)
self.connect(self.ctrl_frame.stop_button, SIGNAL("clicked()"), self.on_stop_button)
self.connect(self.ctrl_frame.step_button, SIGNAL("clicked()"), self.on_step_button)
self.connect(self.redef_frame.redef_button, SIGNAL("clicked()"), self.on_redef_button)
self.connect(self.config_frame.logic_combo, SIGNAL("currentIndexChanged(int)"), self.on_logic_combo)
self.connect(self.config_frame.infer_combo, SIGNAL("currentIndexChanged(int)"), self.on_infer_combo)
self.connect(self.config_frame.defuzzy_combo, SIGNAL("currentIndexChanged(int)"), self.on_defuzzy_combo)
self.connect(self.tabs, SIGNAL("currentChanged(int)"), self.on_change_tab)
self.connect(self.rule_combo, SIGNAL("currentIndexChanged(int)"), self.on_rule_combo)
# Exibe o frame
self.set_state(pi/8., 0., 0., 0., 0.)
self.show()
def enable(self):
self.ctrl_frame.enable()
self.redef_frame.enable()
self.config_frame.enable()
def disable(self):
self.ctrl_frame.disable()
self.redef_frame.disable()
self.config_frame.disable()
def __drawO(self):
x = self.Orange
self.Ograph.setMultiData( [
(x, ip.Ovbn(x)), (x, ip.Obn(x)), (x, ip.Osn(x)), (x, ip.Oz(x)),
(x, ip.Osp(x)), (x, ip.Obp(x)), (x, ip.Ovbp(x)), ([ 0. ], [ 0. ])
] )
def __draww(self):
x = self.wrange
self.wgraph.setMultiData( [
(x, ip.wbn(x)), (x, ip.wsn(x)), (x, ip.wz(x)),
(x, ip.wsp(x)), (x, ip.wbp(x)), ([ 0. ], [ 0. ])
] )
def __drawF(self):
x = ip.F
self.Fgraph.setMultiData( [
( [ 0. ], [ 0. ] ), ( [ 0. ], [ 0. ] ), ( [ 0., 0. ], [ -0.025, -0.1 ] ),
(x, ip.Fvvbn(x)), (x, ip.Fvbn(x)), (x, ip.Fbn(x)), (x, ip.Fsn(x)),
(x, ip.Fz(x)), (x, ip.Fsp(x)), (x, ip.Fbp(x)), (x, ip.Fvbp(x)),
(x, ip.Fvvbp(x))
] )
def set_state(self, O, w, x, v, F):
self.Otrack = [ O ]
self.wtrack = [ w ]
self.xtrack = [ x ]
self.vtrack = [ v ]
self.Ftrack = [ F ]
self.ip.set_state(O, w, x, v)
self.feedback(O, w, x, v, F)
def feedback(self, O, w, x, v, F):
ci = self.tabs.currentIndex()
if ci == 0: # Pendulum
self.ipview.set_state(O, w, x, v, F)
elif ci == 1: # Plots
t = arange(0., 2.5, self.ip.dt)
self.graph.setMultiData( [
(t, self.Otrack), (t, self.wtrack),
(t, self.xtrack), (t, self.vtrack),
(t, zeros(t.shape)) #self.Ftrack)
])
elif ci == 2: # Membership
self.Ograph.setData(-1, [ O, O ], [ 0., 1. ])
self.wgraph.setData(-1, [ w, w ], [ 0., 1. ])
self.Fgraph.setData(2, [ F, F ], [ -0.025, -0.1 ])
rF = self.pc.eval_all(O, w)
rule = self.rule_combo.currentIndex()
_, sF = self.pc.eval(rule, (O, w))
if sF is None:
sF = zeros(ip.F.shape)
self.Fgraph.setData(0, ip.F, rF)
self.Fgraph.setData(1, ip.F, sF)
self.redef_frame.feedback(O, w, x, v, F)
def step(self):
O, w, x, v = self.ip.get_state()
F = self.pc(O, w)
self.ip.apply(F)
self.feedback(O, w, x, v, F)
self.Otrack.append(O)
self.wtrack.append(w)
self.xtrack.append(x)
self.vtrack.append(v)
self.Ftrack.append(F)
def on_go_button(self):
self.disable()
self.running = True
while self.running:
self.step()
self.app.processEvents()
self.enable()
def on_stop_button(self):
self.running = False
def on_step_button(self):
if self.running:
return
self.step()
def on_redef_button(self):
if self.running:
return
O, w, x, v = self.redef_frame.get_values()
self.Otrack = [ ]
self.wtrack = [ ]
self.xtrack = [ ]
self.vtrack = [ ]
self.Ftrack = [ ]
self.set_state(O, w, x, v, 0)
def on_logic_combo(self, index):
if index == 0: # Zadeh norms
self.pc.set_norm(ZadehAnd)
self.pc.set_conorm(ZadehOr)
self.pc.set_negation(ZadehNot)
elif index == 1: # Probabilistic norms
self.pc.set_norm(ProbabilisticAnd)
self.pc.set_conorm(ProbabilisticOr)
self.pc.set_negation(ProbabilisticNot)
elif index == 2: # Einstein norms
self.pc.set_norm(EinsteinProduct)
self.pc.set_conorm(EinsteinSum)
self.pc.set_negation(ZadehNot)
elif index == 3: # Drastic norms
self.pc.set_norm(DrasticProduct)
self.pc.set_conorm(DrasticSum)
self.pc.set_negation(ZadehNot)
def on_infer_combo(self, index):
if index == 0: # Mamdani rules
self.pc.set_implication(MamdaniImplication)
self.pc.set_aglutination(MamdaniAglutination)
elif index == 1: # Probabilistic rules
self.pc.set_implication(ProbabilisticImplication)
self.pc.set_aglutination(ProbabilisticAglutination)
elif index == 2: # Zadeh implication and Mamdani aglutination
self.pc.set_implication(ZadehImplication)
self.pc.set_aglutination(MamdaniAglutination)
elif index == 3: # Dienes-Rescher implication and Mamdani aglutination
self.pc.set_implication(DienesRescherImplication)
self.pc.set_aglutination(MamdaniAglutination)
elif index == 4: # Lukasiewicz implication and Mamdani aglutination
self.pc.set_implication(LukasiewiczImplication)
self.pc.set_aglutination(MamdaniAglutination)
elif index == 5: # Godel implication and Mamdani aglutination
self.pc.set_implication(GodelImplication)
self.pc.set_aglutination(MamdaniAglutination)
def on_defuzzy_combo(self, index):
if index == 0: # Centroid:
self.pc.defuzzify = Centroid
elif index == 1: # Bisection
self.pc.defuzzify = Bisector
elif index == 2: # SOM
self.pc.defuzzify = SmallestOfMaxima
elif index == 3: # LOM
self.pc.defuzzify = LargestOfMaxima
elif index == 4: # MOM
self.pc.defuzzify = MeanOfMaxima
def on_change_tab(self, index):
if index == 0: # Pendulum
O = self.Otrack[-1]
w = self.wtrack[-1]
x = self.xtrack[-1]
v = self.vtrack[-1]
F = self.Ftrack[-1]
self.ipview.set_state(O, w, x, v, F)
self.rule_combo.setEnabled(False)
elif index == 1: # Plots
t = arange(0., 2.5, self.ip.dt)
self.graph.setMultiData( [
(t, self.Otrack), (t, self.wtrack),
(t, self.xtrack), (t, self.vtrack),
(t, zeros(t.shape)) #self.Ftrack)
])
self.rule_combo.setEnabled(False)
elif index == 2: # Membership
O = self.Otrack[-1]
w = self.wtrack[-1]
x = self.xtrack[-1]
v = self.vtrack[-1]
F = self.Ftrack[-1]
self.feedback(O, w, x, v, F)
self.rule_combo.setEnabled(True)
def on_rule_combo(self, index):
O = self.Otrack[-1]
w = self.wtrack[-1]
x = self.xtrack[-1]
v = self.vtrack[-1]
F = self.Ftrack[-1]
self.feedback(O, w, x, v, F)
def closeEvent(self, event):
self.on_stop_button()
self.app.exit(0)
################################################################################
# Main Program
################################################################################
if __name__ == "__main__":
q = QApplication([])
f = IPFrame(q, None)
q.exec_()
| Python |
# -*- coding: utf-8 -*-
################################################################################
# Mathematical model of an inverted pendulum
# Jose Alexandre Nalon
#
# Date: 06-12-2007
# this file implements the dynamical model of the pendulum.
################################################################################
################################################################################
# Used modules
################################################################################
from numpy import *
from peach import *
################################################################################
# Classes
################################################################################
class InvertedPendulum(object):
'''
Dynamic model of an inverted pendulum. It calculates the linear and angular
accelerations (represented by a and q, respectively). Speed and position
are calculated through Euler discretization of the differential equations.
'''
def __init__(self, l = 0.5, m = 0.1, mc = 0.5, dt = 0.01):
'''
Initializes the pendulum.
:Parameters:
l
Pendulum length (in meters)
m
Pendulum mass (in kilograms)
mc
Cart mass (in kilograms)
dt
Time delta for simulation (in seconds)
'''
self.l = l
self.m = m
self.mc = mc
self.dt = dt
self.O = 0. # Pendulum angular position in rad
self.w = 0. # Pendulum angular velocity in rad/s
self.x = 0. # Cart position in meters
self.v = 0. # Cart speed in meters/second
def __pv(self, x):
'''
Principal value of angle x (that is, -pi <= x < pi)
'''
return (x + pi)%(2*pi) - pi
def set_state(self, O = 0., w = 0., x = 0., v = 0.):
'''
Sets the state of the pendulum.
:Parameters:
O
Angular position in radians (theta)
w
Angular velocity in radians/second (omega)
x
Position of the cart in meters
v
Speed of the cart in meters/second
'''
self.O = self.__pv(O)
self.w = w
self.x = x
self.v = v
def get_state(self):
'''
Get the state of the pendulum, in the form of a tuple.
:Returns:
A tuple containing, in order, the angular position in radians, the
angular velocity in radians/seconds, the cart position in meters, the
cart speed in meters/second.
'''
return (self.O, self.w, self.x, self.v)
def apply(self, F):
'''
Given the present state of the cart, calculates the next values for the
state variables.
:Parameters:
F
Force applied to the cart
'''
g = 9.80665 # Gravity in m/s^2
l = self.l
O = self.O
w = self.w
x = self.x
v = self.v
so = sin(O)
co = cos(O)
m = self.m
mc = self.mc
dt = self.dt
M = m + mc
q = (g*so + (-F - m*l*w*w*so)*co/M) / (l * (4./3. - m*co*co/M))
a = F - (m*l*(w*w*so - q*co)) / M
self.w = w + q*dt
self.O = self.__pv(O + self.w*dt)
self.v = v + a*dt
self.x = x + self.v*dt
return self.get_state()
################################################################################
# Mamdani controller for the pendulum
################################################################################
Points = 500 # Number of points to represent the domain.
# Create the membership functions to variable O (inclination of the pendulum).
# Ovbn = Very Big Negative
# Obn = Big Negative
# Osn = Small Negative
# Oz = Near Zero
# Osp = Small Positive
# Obp = Big Positive
# Ovbp = Very Big Positive
Ovbn, Obn, Osn, Oz, Osp, Obp, Ovbp = FlatSaw((-3*pi/8, 3*pi/8), 7)
# Create the membership functions to variable w (angular speed).
# wbn = Big Negative
# wsn = Small Negative
# wz = Near Zero
# wsp = Small Positive
# wbp = Big Positive
wbn, wsn, wz, wsp, wbp = FlatSaw((-3*pi, 3*pi), 5)
# Create the membership functions to variable F (Force applied to chart).
# Fvvbn = Very Very Big Negative
# Fvbn = Very Big Negative
# Fbn = Big Negative
# Fsn = Small Negative
# Fz = Near Zero
# Fsp = Small Positive
# Fbp = Big Positive
# Fvbp = Very Big Positive
# Fvvbp = Very Very Big Positive
F = linspace(-100., 100., Points)
Fvvbn, Fvbn, Fbn, Fsn, Fz, Fsp, Fbp, Fvbp, Fvvbp = FlatSaw((-100., 100.), 9)
# Create the membership functions to variable x (cart position).
xn, xz, xp = FlatSaw((-10., 10.), 3)
# Create the membership functions to variable v (cart speed).
vn, vz, vp = FlatSaw((-6., 6.), 3)
# Create the controller and insert into it the decision rules. The decision
# rules are inserted with the use of the add_table method. In this table, each
# line represents a linguistic value of the O variable; each column represents
# a linguistic value of the variable w. Each element of the table is the given
# answer linguistic value of the variable F.
PendulumController = Controller(F, rules=[], defuzzy=Centroid)
PendulumController.add_table(
[ Ovbn, Obn, Osn, Oz, Osp, Obp, Ovbp ],
[ wbn, wsn, wz, wsp, wbp ],
[ [ Fvvbn, Fvvbn, Fvbn, Fbn, Fsn ],
[ Fvvbn, Fvbn, Fbn, Fsn, Fz ],
[ Fvbn, Fbn, Fsn, Fz, Fsp ],
[ Fbn, Fsn, Fz, Fsp, Fbp ],
[ Fsn, Fz, Fsp, Fbp, Fvbp ],
[ Fz, Fsp, Fbp, Fvbp, Fvvbp ],
[ Fsp, Fbp, Fvbp, Fvvbp, Fvvbp ] ] )
# Decision rules for position and speed of the cart. While this worked, the
# pendulum ended up very unstable.
#PendulumController.add_table(
# [ xn, xz, xp ], [ vn, vz, vp ],
# [ [ Fpp, Fpp, Fz ],
# [ Fpp, Fz, Fpn ],
# [ Fz, Fpn, Fpn ] ] )
| Python |
# -*- coding: utf-8 -*-
################################################################################
# Widget to draw the inverted pendulum.
# Jose Alexandre Nalon
#
# Date: 06-12-2007
################################################################################
################################################################################
# Used modules
################################################################################
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from numpy import *
################################################################################
# Classes
################################################################################
class ArrowItem(QGraphicsItem):
def __init__(self, gs, pen, brush, *cnf):
QGraphicsItem.__init__(self, *cnf)
self.body = gs.addLine(QLineF(), pen)
self.a = gs.addPolygon(QPolygonF(), pen, brush)
def setZValue(self, zvalue):
self.body.setZValue(zvalue)
self.a.setZValue(zvalue)
def show(self):
self.body.show()
self.a.show()
def hide(self):
self.body.hide()
self.a.hide()
def set_coordinates(self, v_xo, v_yo, v_xi, v_yi):
ca = v_xi - v_xo
co = v_yi - v_yo
t = arctan2(co, ca)
t1 = t + pi/8.
t2 = t - pi/8.
P1 = QPointF(v_xi, v_yi)
P2 = QPointF(v_xi-6*cos(t1), v_yi-6*sin(t1))
P3 = QPointF(v_xi-6*cos(t2), v_yi-6*sin(t2))
self.body.setLine(v_xo, v_yo, v_xi, v_yi)
self.a.setPolygon(QPolygonF([ P1, P2, P3 ]))
self.body.show()
self.a.show()
################################################################################
class PendulumView(QGraphicsView):
"""
Visualization of the pendulum.
"""
def __init__(self, l = 0.5, m = 0.1, *cnf):
QGraphicsView.__init__(self, *cnf)
self.__xsize = self.width()
self.__ysize = self.height()
self.pend_radius = 0.5 * m
self.pole_length = l
self.__set_scale()
self.__create_gs()
self.__create_objects()
self.set_state(0., 0., 0., 0., 0.)
self.show()
def __set_scale(self):
xmin = -0.1
ymin = -1.1
xmax = 1.1
ymax = 1.1
self.__ay = - float(self.__ysize) / (ymax - ymin)
self.__by = - self.__ay * ymax
self.__ax = - self.__ay # Same scale for x-axis
self.__bx = self.__xsize / 2.
def __create_gs(self):
self.gs = QGraphicsScene(0, 0, self.__xsize, self.__ysize)
self.gs.setBackgroundBrush(QBrush(QColor(255, 255, 255)))
self.setScene(self.gs)
def __create_objects(self):
# Draws the floor
ip_pen = QPen(QColor(0, 0, 0), 2)
ip_brush = QBrush(QColor(255, 255, 255))
_, fy = self.__transform(0., -0.04)
fw = 0.025 * self.__ay
floor = self.gs.addRect(QRectF(5, fy, self.__xsize-10, fw), ip_pen, ip_brush)
floor.setZValue(200)
floor.show()
# Dimensions of the pole in meters and radians
self.pole = self.gs.addLine(QLineF(), ip_pen)
self.pole.setZValue(300)
self.pole.show()
# Dimensions of the weight in meters and kilograms
self.pend = self.gs.addEllipse(QRectF(), ip_pen, ip_brush)
self.pend.setZValue(500)
self.pend.show()
# Feedback of the angle
ref_pen = QPen(QColor(0, 128, 0))
ref_pen.setStyle(Qt.DashLine)
self.reference = self.gs.addLine(QLineF(), ref_pen)
self.reference.setZValue(100)
self.reference.show()
self.angle_text = self.gs.addText('')
self.angle_text.setDefaultTextColor(QColor(0, 128, 0))
self.angle_text.setZValue(105)
self.angle_text.show()
# Feedback of angular velocity
av_pen = QPen(QColor(0, 0, 128))
av_pen.setWidth(2)
av_brush = QBrush(QColor(0, 0, 128))
self.angle_velocity = ArrowItem(self.gs, av_pen, av_brush)
self.angle_velocity.setZValue(400)
self.angle_velocity.show()
self.av_text = self.gs.addText('')
self.av_text.setDefaultTextColor(QColor(0, 0, 128))
self.av_text.setZValue(106)
self.av_text.show()
# Dimensions of the cart in meters
self.cart_width = 0.3
self.cart_height = 0.1
self.cart = self.gs.addRect(QRectF(), ip_pen, ip_brush)
self.cart.setZValue(502)
self.cart.show()
# Feedback of force vector (in newtons)
vector_pen = QPen(QColor(192, 0, 0))
vector_pen.setWidth(2)
vector_brush = QBrush(QColor(192, 0, 0))
self.vector_length = 0.1
self.force = ArrowItem(self.gs, vector_pen, vector_brush)
self.force.setZValue(100)
self.force.show()
# Force vector text (in newtons)
self.force_text = self.gs.addText('')
self.force_text.setDefaultTextColor(QColor(192, 0, 0))
self.force_text.setZValue(104)
self.force_text.show()
def set_state(self, O, w, x, v, F):
self.__state = (O, w, x, v, F)
# Updates the pole
pole_xo = x
pole_yo = self.cart_height
pole_xi = pole_xo + self.pole_length*sin(O)
pole_yi = pole_yo + self.pole_length*cos(O)
pole_xo, pole_yo = self.__transform(pole_xo, pole_yo)
pole_xi, pole_yi = self.__transform(pole_xi, pole_yi)
self.pole.setLine(pole_xo, pole_yo, pole_xi, pole_yi)
# Updates the weight
pend_radius = self.pend_radius * self.__ax
self.pend.setRect(pole_xi-pend_radius, pole_yi-pend_radius, pend_radius*2, pend_radius*2)
# Updates the angle reference
_, ref_yi = self.__transform(0, self.pole_length)
self.reference.setLine(pole_xo, pole_yo, pole_xo, pole_yo-ref_yi)
self.angle_text.setHtml('O = %7.2f'%(O*180./pi))
self.angle_text.setPos(pole_xo-37, pole_yo-ref_yi-20)
self.angle_text.show()
# Updates the cart
cart_x = x - self.cart_width/2.
cart_y = 0.
cart_x, cart_y = self.__transform(cart_x, cart_y)
cart_width = self.cart_width * self.__ax
cart_height = self.cart_height * self.__ay
self.cart.setRect(cart_x, cart_y, cart_width, cart_height)
# Updates the angular velocity
if -0.1 < w < 0.1:
av_l = sign(w)*0.01
else:
av_l = 0.1*w
if w > 0.01:
av_xi = pole_xi + pend_radius*cos(O)
av_yi = pole_yi + pend_radius*sin(O)
av_xo = av_xi + self.__ax * av_l * cos(O)
av_yo = av_yi - self.__ay * av_l * sin(O)
self.angle_velocity.set_coordinates(av_xi, av_yi, av_xo, av_yo)
self.av_text.setHtml('w = %7.4f' % w)
self.av_text.setPos(av_xo, av_yo-8)
self.av_text.show()
elif w < -0.01:
av_xi = pole_xi - pend_radius*cos(O)
av_yi = pole_yi - pend_radius*sin(O)
av_xo = av_xi + self.__ax * av_l * cos(O)
av_yo = av_yi - self.__ay * av_l * sin(O)
self.angle_velocity.set_coordinates(av_xi, av_yi, av_xo, av_yo)
self.av_text.setHtml('w = %7.4f' % w)
self.av_text.setPos(av_xo-70, av_yo-8)
self.av_text.show()
else:
self.angle_velocity.hide()
self.av_text.hide()
# Updates the force vector
if -0.2 < F < 0.2:
vector_l = sign(F)*0.02
else:
vector_l = 0.1*F
if vector_l > 0.1:
vector_x = x - self.cart_width/2. - vector_l
v_x, v_y = self.__transform(vector_x, self.cart_height/2.)
vector_l = vector_l * self.__ax
self.force.set_coordinates(v_x, v_y, v_x + vector_l-2, v_y)
self.force_text.setHtml('F = %7.4f' % F)
self.force_text.setPos(v_x+vector_l-80, v_y - 20)
self.force_text.show()
elif vector_l < -0.1:
vector_x = x + self.cart_width/2. - vector_l
v_x, v_y = self.__transform(vector_x, self.cart_height/2.)
vector_l = vector_l * self.__ax
self.force.set_coordinates(v_x, v_y, v_x + vector_l+2, v_y)
self.force_text.setHtml('F=%7.4f' % F)
self.force_text.setPos(v_x+vector_l+5, v_y - 20)
self.force_text.show()
else:
self.force.hide()
self.force_text.hide()
# Updates the scene
self.gs.update()
def __transform(self, x, y):
'''
Transforms a pair of real world coordinates to screen coordinates.
'''
xr = int(self.__ax * x + self.__bx)
yr = int(self.__ay * y + self.__by)
return (xr, yr)
def resizeEvent(self, event):
self.__xsize = event.size().width()
self.__ysize = event.size().height()
self.__set_scale()
self.__create_gs()
self.__create_objects()
self.set_state(*self.__state)
################################################################################
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError as e:
from distutils.core import setup
long_description = '''
Peach is a pure-python module, based on SciPy and NumPy to implement
algorithms for computational intelligence and machine learning. Methods
implemented include, but are not limited to, artificial neural networks,
fuzzy logic, genetic algorithms, swarm intelligence and much more.
The aim of this library is primarily educational. Nonetheless, care was
taken to make the methods implemented also very efficient.
'''
setup(
name='Peach',
version='0.3.1',
url='http://code.google.com/p/peach/',
download_url='http://code.google.com/p/peach/downloads/list',
license='GNU Lesser General Public License',
author='Jose Alexandre Nalon',
author_email='jnalon@gmail.com',
description='Python library for computational intelligence and machine learning',
long_description=long_description,
classifiers=[
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Topic :: Education',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='artificial intelligence neural network genetic algorithm fuzzy logic optimization artificial life',
packages=[
'peach',
'peach.fuzzy',
'peach.ga',
'peach.nn',
'peach.optm',
'peach.pso',
'peach.sa'
],
install_requires=[
'bitarray',
],
)
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/nn.py
# Basic topologies of neural networks
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic topologies of neural networks.
This sub-package implements various neural network topologies, see the complete
list below. These topologies are implemented using the ``Layer`` class of the
``base`` sub-package. Please, consult the documentation of that module for more
information on layers of neurons. The neural nets implemented here don't derive
from the ``Layer`` class, instead, they have instance variables to take control
of them. Thus, there is no base class for networks. While subclassing the
classes of this module is usually safe, it is recomended that a new kind of
net is developed from the ground up.
"""
################################################################################
from numpy import array, sum, abs, reshape, sqrt, argmin, zeros, dot
import random
from base import *
from af import *
from lrules import *
################################################################################
class FeedForward(list):
'''
Classic completely connected neural network.
A feedforward neural network is implemented as a list of layers, each layer
being a ``Layer`` object (please consult the documentation on the ``base``
module for more information on layers). The layers are completely connected,
which means that every neuron in one layers is connected to every other
neuron in the following layer.
There is a number of learning methods that are already implemented, but in
general, any learning class derived from ``FFLearning`` can be used. No
other kind of learning can be used. Please, consult the documentation on the
``lrules`` (*learning rules*) module.
'''
def __init__(self, layers, phi=Linear, lrule=BackPropagation, bias=False):
'''
Initializes a feedforward neural network.
A feedforward network is implemented as a list of layers, completely
connected.
:Parameters:
layers
A list of integers containing the shape of the network. The first
element of the list is the number of inputs of the network (or, as
somebody prefer, the number of input neurons); the number of outputs
is the number of neurons in the last layer. Thus, at least two
numbers should be given.
phi
The activation functions to be used with each layer of the network.
Please consult the ``Layer`` documentation in the ``base`` module
for more information. This parameter can be a single function or a
list of functions. If only one function is given, then the same
function is used in every layer. If a list of functions is given,
then the layers use the functions in the sequence given. Note that
heterogeneous networks can be created that way. Defaults to
``Linear``.
lrule
The learning rule used. Only ``FFLearning`` objects (instances of
the class or of the subclasses) are allowed. Defaults to
``BackPropagation``. Check the ``lrules`` documentation for more
information.
bias
If ``True``, then the neurons are biased.
'''
list.__init__(self, [ ])
layers = list(layers)
for n, m in zip(layers[:-1], layers[1:]):
self.append(Layer((m, n), bias=bias))
self.phi = phi
self.__n = len(self)
self.__lrule = lrule
if isinstance(lrule, FFLearning):
self.__lrule = lrule
else:
try:
issubclass(lrule, FFLearning)
self.__lrule = lrule()
except TypeError:
raise ValueError, 'uncompatible learning rule'
def __getnlayers(self):
return self.__n
nlayers = property(__getnlayers, None)
'''Number of layers of the neural network. Not writable.'''
def __getbias(self):
r = [ ]
for l in self:
r.append(l.bias)
return tuple(r)
bias = property(__getbias, None)
'''A tuple containing the bias of each layer. Not writable.'''
def __gety(self):
return self[-1].y
y = property(__gety, None)
'''A list of activation values for each neuron in the last layer of the
network, ie., the answer of the network. This property is available only
after the network is fed some input.'''
def __getphi(self):
r = [ ]
for l in self:
r.append(l.phi)
return tuple(r)
def __setphi(self, phis):
try:
phis = tuple(phis)
for w, v in zip(self, phis):
w.phi = v
except TypeError:
for w in self:
w.phi = phis
phi = property(__getphi, __setphi)
'''Activation functions for every layer in the network. It is a list of
``Activation`` objects, but can be set with only one function. In this case,
the same function is used for every layer.'''
def __call__(self, x):
'''
The feedforward method of the network.
The ``__call__`` interface should be called if the answer of the neuron
network to a given input vector ``x`` is desired. *This method has
collateral effects*, so beware. After the calling of this method, the
``y`` property is set with the activation potential and the answer of
the neurons, respectivelly.
:Parameters:
x
The input vector to the network.
:Returns:
The vector containing the answer of every neuron in the last layer, in
the respective order.
'''
for w in self:
x = w(x)
return self[-1].y
def learn(self, x, d):
'''
Applies one example of the training set to the network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``FeedForward`` instances, only ``FFLearning`` learning is allowed.
Also, notice that *this method only applies the learning method!* The
network should be fed with the same input vector before trying to learn
anything first. Consult the ``feed`` and ``train`` methods below for
more ways to train a network.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
d
The desired answer of the network for this particular input vector.
Notice that the desired answer should have the same dimension of the
last layer of the network. This means that a desired answer should
be given for every output of the network.
:Returns:
The error obtained by the network.
'''
self.__lrule(self, x, d)
return sum(abs(d - self.y))
def feed(self, x, d):
'''
Feed the network and applies one example of the training set to the
network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``FeedForward`` instances, only ``FFLearning`` learning is allowed.
Also, notice that *this method feeds the network* before applying the
learning rule. Feeding the network has collateral effects, and some
properties change when this happens. Namely, the ``y`` property is set.
Please consult the ``__call__`` interface.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
d
The desired answer of the network for this particular input vector.
Notice that the desired answer should have the same dimension of the
last layer of the network. This means that a desired answer should
be given for every output of the network.
:Returns:
The error obtained by the network.
'''
self(x)
return self.learn(x, d)
def train(self, train_set, imax=2000, emax=1e-5, randomize=False):
'''
Presents a training set to the network.
This method automatizes the training of the network. Given a training
set, the examples are shown to the network (possibly in a randomized
way). A maximum number of iterations or a maximum admitted error should
be given as a stop condition.
:Parameters:
train_set
The training set is a list of examples. It can have any size and can
contain repeated examples. In fact, the definition of the training
set is open. Each element of the training set, however, should be a
two-tuple ``(x, d)``, where ``x`` is the input vector, and ``d`` is
the desired response of the network for this particular input. See
the ``learn`` and ``feed`` for more information.
imax
The maximum number of iterations. Examples from the training set
will be presented to the network while this limit is not reached.
Defaults to 2000.
emax
The maximum admitted error. Examples from the training set will be
presented to the network until the error obtained is lower than this
limit. Defaults to 1e-5.
randomize
If this is ``True``, then the examples are shown in a randomized
order. If ``False``, then the examples are shown in the same order
that they appear in the ``train_set`` list. Defaults to ``False``.
'''
i = 0
error = 1
s = len(train_set)
while i<imax and error>emax:
if randomize:
x, d = random.choice(train_set)
else:
x, d = train_set[i%s]
error = self.feed(x, d)
i = i+1
return error
################################################################################
class SOM(Layer):
'''
A Self-Organizing Map (SOM).
A self-organizing map is a type of neural network that is trained via
unsupervised learning. In particular, the self-organizing map finds the
neuron closest to an input vector -- this neuron is the winning neuron, and
it is the answer of the network. Thus, the SOM is usually used for
classification and pattern recognition.
The SOM is a single-layer network, so this class subclasses the ``Layer``
class. But some of the properties of a ``Layer`` object are not available or
make no sense in this context.
'''
def __init__(self, shape, lrule=Competitive):
'''
Initializes a self-organizing map.
A self-organizing map is implemented as a layer of neurons. There is no
connection among the neurons. The answer to a given input is the neuron
closer to the given input. ``phi`` (the activation function) ``v`` (the
activation potential) and ``bias`` are not used.
:Parameters:
shape
Stablishes the size of the SOM. It must be a two-tuple of the
format ``(m, n)``, where ``m`` is the number of neurons in the
layer, and ``n`` is the number of inputs of each neuron. The neurons
in the layer all have the same number of inputs.
lrule
The learning rule used. Only ``SOMLearning`` objects (instances of
the class or of the subclasses) are allowed. Defaults to
``Competitive``. Check the ``lrules`` documentation for more
information.
'''
Layer.__init__(self, shape, phi=None, bias=False)
self.__lrule = lrule
self.__y = None
self.__phi = None
if isinstance(lrule, SOMLearning):
self.__lrule = lrule
else:
try:
issubclass(lrule, SOMLearning)
self.__lrule = lrule()
except TypeError:
raise ValueError, 'uncompatible learning rule'
def __gety(self):
if self.__y is None:
raise ValueError, "activation unavailable"
else:
return self.__y
y = property(__gety, None)
'''The winning neuron for a given input, the answer of the network. This
property is available only after the network is fed some input.'''
def __call__(self, x):
'''
The response of the network to a given input.
The ``__call__`` interface should be called if the answer of the neuron
network to a given input vector ``x`` is desired. *This method has
collateral effects*, so beware. After the calling of this method, the
``y`` property is set with the activation potential and the answer of
the neurons, respectivelly.
:Parameters:
x
The input vector to the network.
:Returns:
The winning neuron.
'''
x = reshape(x, (1, self.inputs))
dist = sqrt(sum((x - self.weights)**2, axis=1))
self.__y = argmin(dist)
return self.y
def learn(self, x):
'''
Applies one example of the training set to the network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``SOM`` instances, only ``SOMLearning`` learning is allowed.
Also, notice that *this method only applies the learning method!* The
network should be fed with the same input vector before trying to learn
anything first. Consult the ``feed`` and ``train`` methods below for
more ways to train a network.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
:Returns:
The error obtained by the network.
'''
self.__lrule(self, x)
return sum(abs(x - self.y))
def feed(self, x):
'''
Feed the network and applies one example of the training set to the
network.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly of a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for
``SOM`` instances, only ``SOMLearning`` learning is allowed.
Also, notice that *this method feeds the network* before applying the
learning rule. Feeding the network has collateral effects, and some
properties change when this happens. Namely, the ``y`` property is set.
Please consult the ``__call__`` interface.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
:Returns:
The error obtained by the network.
'''
self(x)
return self.learn(x)
def train(self, train_set, imax=2000, emax=1e-5, randomize=False):
'''
Presents a training set to the network.
This method automatizes the training of the network. Given a training
set, the examples are shown to the network (possibly in a randomized
way). A maximum number of iterations or a maximum admitted error should
be given as a stop condition.
:Parameters:
train_set
The training set is a list of examples. It can have any size and can
contain repeated examples. In fact, the definition of the training
set is open. Each element of the training set, however, should be a
input vector of the correct dimensions, See the ``learn`` and
``feed`` for more information.
imax
The maximum number of iterations. Examples from the training set
will be presented to the network while this limit is not reached.
Defaults to 2000.
emax
The maximum admitted error. Examples from the training set will be
presented to the network until the error obtained is lower than this
limit. Defaults to 1e-5.
randomize
If this is ``True``, then the examples are shown in a randomized
order. If ``False``, then the examples are shown in the same order
that they appear in the ``train_set`` list. Defaults to ``False``.
'''
i = 0
error = 1
s = len(train_set)
while i<imax and error>emax:
if randomize:
x = random.choice(train_set)
else:
x = train_set[i%s]
error = self.feed(x)
i = i+1
return error
################################################################################
class GRNN(object):
"""
GRNN is the implementation of General Regression Neural Network, a kind of
probabilistic neural network used in regression tasks.
"""
def __init__(self, sigma=0.1):
"""
Initializes the network.
Is not necessary to inform the training set size, GRNN will do it by
itself in ``train`` method.
:Parameters:
sigma
A real number. This value determines the spread of probability
density function (i.e is the smoothness parameter). A great value
for sigma will result in a large spread gaussian and the sample
points will cover a wide range of inputs, while a small value will
create a limited spread gaussian and the sample points will cover a
small range of inputs
"""
self._samples = None
self._targets = None
self.sigma = sigma
def _kernel(self, x1, x2):
"""
This method gives a measure of how well a training sample can represent
the position of prediction (i.e. how well x1 can represent x2, or vice
versa). If the distance D between x1 and x2 is small, result becomes
big. For distance 0 (i.e. x1 == x2), result becomes one and the sample
point is the best representation of prediction point.
In the probabilistic view, this method calculates the probability
distribution.
"""
D = x1-x2
return exp(-dot(D, D)/(2*self.sigma**2))
def train(self, sampleInputs, targets):
"""
Presents a training set to the network.
This method uses the sample inputs to set the size of network.
:Parameters:
sampleInputs
Should be a list of numbers or a list of ``numpy.array`` to set the
sample inputs. These inputs are used to calculate the distance
between prediction points.
targets
The target values of sample inputs. Should be a list of numbers.
"""
self._samples = array(sampleInputs)
self._targets = array(targets)
def __call__(self, x):
"""
The method to predict a value from input ``x``.
:Parameters:
x
The input vector to the network.
:Returns:
The predicted value.
"""
values = [self._kernel(x, x2) for x2 in self._samples]
regular = sum(values)
return dot(values, self._targets)/regular
class PNN(object):
"""
PNN is the implementation of Probabilistic Neural Network, a network used
for classification tasks
"""
def __init__(self, sigma=0.1):
"""
Initializes the network.
Is not necessary to inform the training set size, PNN will do it by
itself in ``train`` method.
:Parameters:
sigma
A real number. This value determines the spread of probability
density function (i.e is the smoothness parameter). A great value
for sigma will result in a large spread gaussian and the sample
points will cover a wide range of inputs, while a small value will
create a limited spread gaussian and the sample points will cover a
small range of inputs
"""
self.sigma = sigma
self._categorys = None
def _kernel(self, x1, x2):
"""
This method gives a measure of how well a training sample can represent
the position of evaluation (i.e. how well x1 can represent x2, or vice
versa). If the distance D between x1 and x2 is small, result becomes
big. For distance 0 (i.e. x1 == x2), result becomes one and the sample
point is the best representation of evaluation point.
In the probabilistic view, this method calculates the probability
distribution.
"""
D = x1-x2
return exp(-dot(D, D)/(2*self.sigma**2))
def train(self, trainSet):
"""
Presents a training set to the network.
This method uses the sample inputs to set the size of network.
:Parameters:
train_set
The training set is a list of examples. It can have any size. In
fact, the definition of the training set is open. Each element of
the training set, however, should be a two-tuple ``(x, d)``, where
``x`` is the input vector, and ``d`` is the desired response of the
network for this particular input, i.e the category of ``x``
pattern.
"""
self._categorys = {}
for pattern, category in trainSet:
if category not in self._categorys:
self._categorys[category] = []
self._categorys[category].append(array(pattern))
def __call__(self, x):
"""
The method to classify the input ``x`` into one of trained category.
:Parameters:
x
The input vector to the network.
:Returns:
The category that best represent the input vector.
"""
sums = {}
for category in self._categorys:
patterns = self._categorys[category]
sums[category] = sum([self._kernel(x, x2) for x2 in patterns])
sums[category] /= float(len(patterns))
return max(sums, key=lambda x:sums[x])
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/nn.py
# Basic topologies of neural networks
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Radial Basis Function Networks
This sub-package implements the basic behaviour of radial basis function
networks. This is a two-layer neural network that works as a universal function
approximator. The activation functions of the first layer are radial basis
functions (RBFs), that are symmetric around the origin, that is, the value of
this kind of function depends only on the distance of the evaluated point to the
origin. The second layer has only one neuron with linear activation, that is, it
only combines the inputs of the first layer.
The training of this kind of network, while it can be done using a traditional
optimization technique such as gradient descent, is usually made in two steps.
In the first step, the position of the centers and the width of the RBFs are
computed. In the second step, the weights of the second layer are adapted. In
this module, the RBFN architecture is implemented, allowing training of the
second layer. Centers must be supplied, but they can be easily found from the
training set using algorithms such as K-Means (the one traditionally used),
SOMs or Fuzzy C-Means.
"""
################################################################################
from numpy import array, amax, sum
from random import choice
from nnet import *
################################################################################
# Classes
class RBFN(object):
def __init__(self, c, phi=Gaussian, phi2=Linear):
'''
Initializes the radial basis function network.
A radial basis function is implemented as two layers of neurons, the
first one with the RBFs, the second one a linear combinator.
:Parameters:
c
Two-dimensional array containing the centers of the radial basis
functions, where each line is a vector with the components of the
center. Thus, the number of lines in this array is the number of
centers of the network.
phi
The radial basis function to be used in the first layer. Defaults to
the gaussian.
phi2
The activation function of the second layer. If the network is being
used to approximate functions, this should be Linear. Since this is
the most commom situation, it is the default value. In occasions,
this can be made (say) a sigmoid, for pattern recognition.
'''
self.__c = array(c)
self.__n = len(self.__c)
wmax = 0.
for ci in self.__c:
w = amax(sum((ci - self.__c)**2, axis=1))
if w > wmax:
wmax = w
self.__w = array([ sqrt(wmax) ]*self.__n) / (self.__n - 1)
self.phi = phi
self.__l = FeedForward((self.__n, 1), phi=phi2, lrule=BackPropagation)
def __getwidth(self):
return self.__w
def __setwidth(self, w):
try:
if len(w) != len(self.__c):
raise AttributeError('Width array must have the same number of componets as the number of centers')
else:
self.__w = array(w)
except TypeError:
self.__w = array([ w ]*self.__n)
width = property(__getwidth, __setwidth)
'''The computed width of the RBFs. This property can be read and written. If
a single value is written, then it is used for every center. If a vector of
values is supplied, then it must be one for each center.'''
def __getweights(self):
return self.__l[0].weights
def __setweights(self, w):
self.__l[0].weights = w
weights = property(__getweights, __setweights)
'''A ``numpy`` array containing the synaptic weights of the second layer of
the network. It is writable, but the new weight array must be the same shape
of the neuron, or an exception is raised.'''
def __gety(self):
return self.__l.y
y = property(__gety, None)
'''The activation value for the second layer of the network, ie., the answer
of the network. This property is available only after the network is fed
some input.'''
def __getphi(self):
return self.__phi
def __setphi(self, phi):
try:
issubclass(phi, RadialBasis)
phi = phi()
except TypeError:
pass
if isinstance(phi, RadialBasis):
self.__phi = phi
else:
self.__phi = RadialBasis(phi)
phi = property(__getphi, __setphi)
'''The radial basis function. It can be set with a ``RadialBasis`` instance
or a standard Python function. If a standard function is given, it must
receive a real value and return a real value that is the activation value of
the neuron. In that case, it is adjusted to work accordingly with the
internals of the layer.'''
def __getphi2(self):
return self.__phi2
def __setphi2(self, phi):
try:
issubclass(phi, Activation)
phi = phi()
except TypeError:
pass
if isinstance(phi, Activation):
self.__phi2 = phi
else:
self.__phi2 = Activation(phi)
phi2 = property(__getphi, __setphi)
'''The activation function for the second layer. It can be set with an
``Activation`` instance or a standard Python function. If a standard
function is given, it must receive a real value and return a real value that
is the activation value of the neuron. In that case, it is adjusted to work
accordingly with the internals of the layer.'''
def __call__(self, x):
'''
Feeds the network and return the result.
The ``__call__`` interface should be called if the answer of the neuron
network to a given input vector ``x`` is desired. *This method has
collateral effects*, so beware. After the calling of this method, the
``y`` property is set with the activation potential and the answer of
the neurons, respectivelly.
:Parameters:
x
The input vector to the network.
:Returns:
The vector containing the answer of every neuron in the last layer, in
the respective order.
'''
x = array([ self.__phi((x-ci)/wi) for ci, wi in zip(self.__c, self.__w) ])
return self.__l(x)
def learn(self, x, d):
'''
Applies one example of the training set to the network.
Using this method, one iteration of the learning procedure is executed
for the second layer of the network. This method presents one example
(not necessarilly from a training set) and applies the learning rule
over the layer. The learning rule is defined in the initialization of
the network, and some are implemented on the ``lrules`` method. New
methods can be created, consult the ``lrules`` documentation but, for
the second layer of a ``RBFN'' instance, only ``FFLearning`` learning is
allowed.
Also, notice that *this method only applies the learning method!* The
network should be fed with the same input vector before trying to learn
anything first. Consult the ``feed`` and ``train`` methods below for
more ways to train a network.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
d
The desired answer of the network for this particular input vector.
Notice that the desired answer should have the same dimension of the
last layer of the network. This means that a desired answer should
be given for every output of the network.
:Returns:
The error obtained by the network.
'''
x = array([ self.__phi((x-ci)/wi) for ci, wi in zip(self.__c, self.__w) ])
return self.__l.learn(x, d)
def feed(self, x, d):
'''
Feed the network and applies one example of the training set to the
network. This adapts only the synaptic weights in the second layer of
the RBFN.
Using this method, one iteration of the learning procedure is made with
the neurons of this network. This method presents one example (not
necessarilly from a training set) and applies the learning rule over the
network. The learning rule is defined in the initialization of the
network, and some are implemented on the ``lrules`` method. New methods
can be created, consult the ``lrules`` documentation but, for the second
layer of a ``RBFN``, only ``FFLearning`` learning is allowed.
Also, notice that *this method feeds the network* before applying the
learning rule. Feeding the network has collateral effects, and some
properties change when this happens. Namely, the ``y`` property is set.
Please consult the ``__call__`` interface.
:Parameters:
x
Input vector of the example. It should be a column vector of the
correct dimension, that is, the number of input neurons.
d
The desired answer of the network for this particular input vector.
Notice that the desired answer should have the same dimension of the
last layer of the network. This means that a desired answer should
be given for every output of the network.
:Returns:
The error obtained by the network.
'''
x = array([ self.__phi((x-ci)/wi) for ci, wi in zip(self.__c, self.__w) ])
return self.__l.feed(x, d)
def train(self, train_set, imax=2000, emax=1e-5, randomize=False):
'''
Presents a training set to the network.
This method automatizes the training of the network. Given a training
set, the examples are shown to the network (possibly in a randomized
way). A maximum number of iterations or a maximum admitted error should
be given as a stop condition.
:Parameters:
train_set
The training set is a list of examples. It can have any size and can
contain repeated examples. In fact, the definition of the training
set is open. Each element of the training set, however, should be a
two-tuple ``(x, d)``, where ``x`` is the input vector, and ``d`` is
the desired response of the network for this particular input. See
the ``learn`` and ``feed`` for more information.
imax
The maximum number of iterations. Examples from the training set
will be presented to the network while this limit is not reached.
Defaults to 2000.
emax
The maximum admitted error. Examples from the training set will be
presented to the network until the error obtained is lower than this
limit. Defaults to 1e-5.
randomize
If this is ``True``, then the examples are shown in a randomized
order. If ``False``, then the examples are shown in the same order
that they appear in the ``train_set`` list. Defaults to ``False``.
'''
i = 0
error = 1
s = len(train_set)
while i<imax and error>emax:
if randomize:
x, d = random.choice(train_set)
else:
x, d = train_set[i%s]
error = self.feed(x, d)
i = i+1
return error
################################################################################
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/base.py
# Basic definitions for layers of neurons
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitions for layers of neurons.
This subpackage implements the basic classes used with neural networks. A neural
network is basically implemented as a layer of neurons. To speed things up, a
layer is implemented as a array, where each line represents the weight vector
of a neuron. Further definitions and algorithms are based on this definition.
"""
################################################################################
from numpy import dot, array, reshape, vstack, ones
from numpy.random import randn
from af import Activation, Linear
_BIAS = ones((1, 1), dtype=float)
"""This constant vector is defined to implement in a fast way the bias of a
neuron, as an input of value 1, stacked over the real input to the neuron."""
################################################################################
# Classes
################################################################################
class Layer(object):
'''
Base class for neural networks.
This class implements a layer of neurons. It is represented by a array of
real values. Each line of the array represents the weight vector of a
single neuron. If the neurons on the layer are biased, then the first
element of the weight vector is the bias weight, and the bias input is
always valued 1. Also, to each layer is associated an activation function,
that determines if the neuron is fired or not. Please, consult the module
``af`` to see more about activation functions.
In general, this class shoulb be subclassed if you want to use neural nets.
But, as neural nets are very different one from the other, check carefully
the documentation to see if the attributes, properties and methods are
suited to your task.
'''
def __init__(self, shape, phi=Linear, bias=False):
"""
Initializes the layer.
A layer is represented by a array where each line is the weight vector
of a single neuron. The first element of the vector is the bias weight,
in case the neuron is biased. Associated with the layer is an activation
function defined in an appropriate way.
:Parameters:
shape
Stablishes the size of the layer. It must be a two-tuple of the
format ``(m, n)``, where ``m`` is the number of neurons in the
layer, and ``n`` is the number of inputs of each neuron. The neurons
in the layer all have the same number of inputs.
phi
The activation function. It can be an ``Activation`` object (please,
consult the ``af`` module) or a standard Python function. In this
case, it must receive a single real value and return a single real
value which determines if the neuron is activated or not. Defaults
to ``Linear``.
bias
If ``True``, then the neurons on the layer are biased. That means
that an additional weight is added to each neuron to represent the
bias. If ``False``, no modification is made.
"""
m, n = shape
if bias:
n = n + 1
self.__weights = randn(m, n)
self.__size = m
self.__inputs = n
# The ``phi`` property sets the activation function, see below.
self.phi = phi
# Properties.
self.__v = None
self.__y = None
self.__bias = bias
def __getsize(self):
return self.__size
size = property(__getsize, None)
'''Number of neurons in the layer. Not writable.'''
def __getinputs(self):
if self.__bias:
return self.__inputs - 1
else:
return self.__inputs
inputs = property(__getinputs, None)
'''Number of inputs for each neuron in the layer. Not writable.'''
def __getshape(self):
if self.__bias:
return (self.__size, self.__inputs - 1)
else:
return (self.__size, self.__inputs)
shape = property(__getshape, None)
'''Shape of the layer, given in the format of a tuple ``(m, n)``, where
``m`` is the number of neurons in the layer, and ``n`` is the number of
inputs in each neuron. Not writable.'''
def __getbias(self):
return self.__bias
bias = property(__getbias, None)
'''True if the neuron is biased. Not writable.'''
def __getweights(self):
return self.__weights
def __setweights(self, m):
self.__weights = array(reshape(m, self.weights.shape))
weights = property(__getweights, __setweights)
'''A ``numpy`` array containing the synaptic weights of the network. Each
line is the weight vector of a neuron. It is writable, but the new weight
array must be the same shape of the neuron, or an exception is raised.'''
def __getphi(self):
return self.__phi
def __setphi(self, phi):
try:
issubclass(phi, Activation)
phi = phi()
except TypeError:
pass
if isinstance(phi, Activation):
self.__phi = phi
else:
self.__phi = Activation(phi)
phi = property(__getphi, __setphi)
'''The activation function. It can be set with an ``Activation`` instance or
a standard Python function. If a standard function is given, it must receive
a real value and return a real value that is the activation value of the
neuron. In that case, it is adjusted to work accordingly with the internals
of the layer.'''
def __getv(self):
if self.__v is None:
raise ValueError, 'activation potential unavailable'
else:
return self.__v
v = property(__getv, None)
'''The activation potential of the neuron. Not writable, and only available
after the neuron is fed some input.'''
def __gety(self):
if self.__y is None:
raise ValueError, 'activation unavailable'
else:
return self.__y
y = property(__gety, None)
'''The activation value of the neuron. Not writable, and only available
after the neuron is fed some input.'''
def __getitem__(self, n):
'''
The ``[ ]`` get interface.
The input to this method is forwarded to the ``weights`` property. That
means that it will return the respective line/element of the weight
array.
:Parameters:
n
A slice object containing the elements referenced. Since it is
forwarded to an array, it behaves exactly as one.
:Returns:
The element or elements in the referenced indices.
'''
return self.weights[n]
def __setitem__(self, n, w):
'''
The ``[ ]`` set interface.
The inputs to this method are forwarded to the ``weights`` property.
That means that it will set the respective line/element of the weight
array.
:Parameters:
n
A slice object containing the elements referenced. Since it is
forwarded to an array, it behaves exactly as one.
w
A value or array of values to be set in the given indices.
'''
self.weights[n] = w
def __call__(self, x):
'''
The feedforward method to the layer.
The ``__call__`` interface should be called if the answer of the neuron
to a given input vector ``x`` is desired. *This method has collateral
effects*, so beware. After the calling of this method, the ``v`` and
``y`` properties are set with the activation potential and the answer of
the neurons, respectivelly.
:Parameters:
x
The input vector to the layer.
:Returns:
The vector containing the answer of every neuron in the layer, in the
respective order.
'''
# Adjusts the input vector in case the neuron is biased. Also, the
# input vector is reshaped as a column-vector.
x = reshape(x, (self.inputs, 1))
if self.__bias:
x = vstack((_BIAS, x))
self.__v = dot(self.weights, x)
self.__y = self.phi(self.__v)
return self.__y
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/af.py
# Activation functions and base class
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Base activation functions and base class
Activation functions define if a neuron is activated or not. There are a lot of
different definitions for activation functions in the literature, and this
sub-package implements some of them. An activation function is defined by its
response and its derivative. Being conveniently defined as classes, it is
possible to define a custom derivative method.
In this package, also, there is a base class that should be subclassed if you
want to define your own activation function. This class, however, can be
instantiated with a standard Python function as an initialization parameter, and
it is adjusted to work with the internals of the package.
If the base class is instantiated, then the function should take a real number
as input, and return a real number. The response of the function determines if
the neuron is activated or not.
"""
################################################################################
from numpy import vectorize, array, where, ones, select, exp, pi, arctan, tanh, cosh, sign
import types
################################################################################
# Classes
################################################################################
class Activation(object):
'''
Base class for activation functions.
This class can be used as base for activation functions. A subclass should
have at least three methods, described below:
__init__
This method should be used to configure the function. In general, some
parameters to change the behaviour of a simple function is passed. In a
subclass, the ``__init__`` method should call the mother class
initialization procedure.
__call__
The ``__call__`` interface is the function call. It should receive a
*vector* of real numbers and return a *vector* of real numbers. Using
the capabilities of the ``numpy`` module will help a lot. In case you
don't know how to use, maybe instantiating this class instead will work
better (see below).
derivative
This method implements the derivative of the activation function. It is
used in the learning methods. If one is not provided (but remember to
call the superclass ``__init__`` so that it is created).
'''
def __init__(self, f=None, df=None):
'''
Initializes the activation function.
Instantiating this class creates and adjusts a standard Python function
to work with layers of neurons.
:Parameters:
f
The activation function. It can be created as a lambda function or
any other method, but it should take a real value, corresponding to
the activation potential of a neuron, and return a real value,
corresponding to its activation. Defaults to ``None``, if none is
given, the identity function is used.
df
The derivative of the above function. It can be defined as above, or
not given. If not given, an estimate is calculated based on the
given function. Defaults to ``None``.
'''
if isinstance(f, types.FunctionType):
self.__f = vectorize(f)
elif f is None:
self.__f = lambda x: array(x, dtype=float)
else:
raise ValueError, 'invalid function'
if df is None:
self.d = self.derivative
'''An alias to the derivative of the function.'''
else:
self.d = df
self.derivative = df
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return self.__f(x)
def derivative(self, x, dx=5.0e-5):
'''
An estimate of the derivative of the activation function.
This method estimates the derivative using difference equations. This is
a simple estimate, but efficient nonetheless.
:Parameters:
x
A real number or vector of real numbers representing the point over
which the derivative is to be calculated.
dx
The value of the interval of the estimate. The smaller this number
is, the better. However, if made too small, the precision is not
enough to avoid errors. This defaults to 5e-5, which is the values
that gives the best results.
:Returns:
The value of the derivative over the given point.
'''
return (self(x+dx/2.0)-self(x-dx/2.0)) / dx
################################################################################
class Threshold(Activation):
'''
Threshold activation function.
'''
def __init__(self, threshold=0.0, amplitude=1.0):
'''
Initializes the object.
:Parameters:
threshold
The threshold value. If the value of the input is lower than this,
the function is 0, otherwise, it is the given ``amplitude``.
amplitude
The maximum value of the function.
'''
self.__t = float(threshold)
self.__a = float(amplitude)
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return where(x >= self.__t, self.__a, 0.0)
def derivative(self, x):
'''
The function derivative. Technically, this function doesn't have a
derivative, but making it equals to 1, this can be used in learning
algorithms.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
try:
return ones(x.shape)
except AttributeError:
return 1.0
Step = Threshold
'''Alias to ``Threshold``'''
################################################################################
class Linear(Activation):
'''
Identity activation function
'''
def __init__(self):
'''
Initializes the function
'''
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return array(x, dtype = float)
def derivative(self, x):
'''
The function derivative.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
try:
return ones(x.shape)
except AttributeError:
return 1.0
Identity = Linear
'''An alias to ``Linear``'''
################################################################################
class Ramp(Activation):
'''
Ramp activation function
'''
def __init__(self, p0=(-0.5, 0.0), p1=(0.5, 1.0)):
'''
Initializes the object.
Two points are needed to set this function. They are used to determine
where the ramp begins and where it ends.
:Parameters:
p0
The starting point, given as a tuple ``(x0, y0)``. For values of the
input below ``x0``, the function returns ``y0``. Defaults to
``(-0.5, 0.0)``.
p1
The ending point, given as a tuple ``(x1, y1)``. For values of the
input above ``x1``, the function returns ``y1``. Defaults to
``(0.5, 1.0)``.
'''
self.__x0 = float(p0[0])
self.__y0 = float(p0[1])
self.__x1 = float(p1[0])
self.__y1 = float(p1[1])
self.__a = (self.__y1 - self.__y0) / (self.__x1 - self.__x0)
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return select([ x < self.__x0, x < self.__x1 ],
[ self.__y0, self.__a * (x - self.__x0) + self.__y0 ],
self.__y1)
def derivative(self, x):
'''
The function derivative.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
return select([ x < self.__x0, x < self.__x1 ],
[ 0.0, self.__a ], 0.0)
################################################################################
class Sigmoid(Activation):
'''
Sigmoid activation function
'''
def __init__(self, a = 1.0, x0 = 0.0):
'''
Initializes the object.
:Parameters:
a
The slope of the function in the center ``x0``. Defaults to 1.0.
x0
The center of the sigmoid. Defaults to 0.0.
'''
self.__a = float(a)
self.__x0 = float(x0)
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return 1.0 / (1.0 + exp(- self.__a*(x - self.__x0)))
def derivative(self, x):
'''
The function derivative.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
t = exp(-self.__a * (x - self.__x0))
return self.__a * t / (1 + t)**2
Logistic = Sigmoid
'''An alias to ``Sigmoid``'''
################################################################################
class Signum(Activation):
'''
Signum activation function
'''
def __init__(self):
'''
Initializes the object.
'''
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return sign(x)
def derivative(self, x):
'''
The function derivative. Technically, this function doesn't have a
derivative, but making it equals to 1, this can be used in learning
algorithms.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
try:
return ones(x.shape)
except AttributeError:
return 1.0
################################################################################
class ArcTan(Activation):
'''
Inverse tangent activation function
'''
def __init__(self, a = 1.0, x0 = 0.0):
'''
Initializes the object
:Parameters:
a
The slope of the function in the center ``x0``. Defaults to 1.0.
x0
The center of the sigmoid. Defaults to 0.0.
'''
self.__a = float(a)
self.__x0 = float(x0)
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return self.__a / pi * arctan(x - self.__x0)
def derivative(self, x):
'''
The function derivative.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
return self.__a / pi / (1.0 + (x - self.__x0)**2)
################################################################################
class TanH(Activation):
'''
Hyperbolic tangent activation function
'''
def __init__(self, a = 1.0, x0 = 0.0):
'''
Initializes the object
:Parameters:
a
The slope of the function in the center ``x0``. Defaults to 1.0.
x0
The center of the sigmoid. Defaults to 0.0.
'''
self.__a = float(a)
self.__x0 = float(x0)
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return self.__a * tanh(x - self.__x0)
def derivative(self, x):
'''
The function derivative.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
return self.__a / cosh(x - self.__x0)**2
################################################################################
# Radial Basis Functions
class RadialBasis(Activation):
'''
This class is used as a base class for radial basis functions (RBFs). It is
in almost every aspect equal to ``Activation`` class, but it is used to
distinguish the two types. RBFs are used in Radial Basis Function Networks,
in which monotonic activations shouldn't be used.
Since it is symmetric according to the origin, a RBF takes no parameters in
its creation.
'''
pass
################################################################################
class Gaussian(RadialBasis):
'''
Gaussian activation function
'''
def __init__(self):
'''
Initializes the object. Takes no parameters
'''
self.d = self.derivative
def __call__(self, x):
'''
Call interface to the object.
This method applies the activation function over a vector of activation
potentials, and returns the results.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The activation function applied over the input vector.
'''
return exp(-x**2)
def derivative(self, x):
'''
The function derivative.
:Parameters:
x
A real number or a vector of real numbers representing the
activation potential of a neuron or a layer of neurons.
:Returns:
The derivative of the activation function applied over the input
vector.
'''
return -2.*x*exp(-x**2)
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/mem.py
# Associative memories and Hopfield models
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Associative memories and Hopfield network model.
This sub-package implements associative memories. In associative memories,
information is recovered by supplying not an exact index (such as in their
usual counterparts), but supplying an index simmilar enough that the information
can be deduced from what is stored in its synaptic weights. There are a number
of different memories of this kind.
The most common type is the Hopfield network. A Hopfield network is a recurrent
self-associative memory. Although there are real-valued versions of the network,
the binary type is more common. In it, patterns are recovered from an initial
estimate through an iterative process.
"""
################################################################################
from numpy import zeros, eye, all
from random import randrange
from peach.nn.base import *
from peach.nn.af import *
################################################################################
# Classes
################################################################################
class Hopfield(Layer):
'''
Hopfield neural network model
A Hopfield network is a recurrent network of one layer of neurons. There
output of every neuron is conected to the inputs of every other neuron, but
not to itself. This kind of network is autoassociative, or content-based
memory. That means that, given a noisy version of a pattern stored in it,
the network is capable of, through an iterative algorithm, recover the
original pattern, removing the noise. There is a limit in the quantity of
patterns that can be stored without causing error, and if a pattern is
stored, its negated form is also stored.
This is the binary form of the Hopfield network, which is the most common
form. It implements a ``Layer`` of neurons, without bias, and with the
Signum as the activation function.
'''
def __init__(self, size, phi=Signum):
'''
Initializes the Hopfield network.
The Hopfield network is implemented as a layer of neurons.
:Parameters:
size
The number of neurons in the network. In a Hopfield network, the
number of neurons is also the number of inputs in each neuron, and
the dimensionality of the patterns to be stored and recovered.
phi
The activation function. Traditionally, the Hopfield network uses
the signum function as activation. This is the default value.
'''
Layer.__init__(self, (size, 1), phi=Signum, bias=False)
self.__size = size
self.__weights = zeros((size, size))
def __getinputs(self):
return self.__size
inputs = property(__getinputs, None)
'''Number of inputs for each neuron in the layer. For a Hopfield model,
there are as much inputs as there are neurons. Not writable.'''
def __getweights(self):
return self.__weights
def __setweights(self, m):
self.__weights = array(reshape(m, self.weights.shape))
weights = property(__getweights, __setweights)
'''A ``numpy`` array containing the synaptic weights of the network. Each
line is the weight vector of a neuron. It is writable, but the new weight
array must be the same shape of the neuron, or an exception is raised.'''
def learn(self, x):
'''
Applies one example of the training set to the network.
Training a Hopfield network is not exactly an iterative procedure. The
network usually stores a small number of patterns, and the learning
procedure consists only in computing the synaptic weight matrix, which
can be done in very few steps (in fact, just the number of patterns).
This method is here for consistency with the rest of the library, but
it works, anyway.
:Parameters:
x
The pattern to be stored. It must be a vector with the same size as
the network, or else an exception will be raised. The pattern can be
of any dimensionality, but it will internally be converted to a
column vector.
'''
n = self.size
print n, len(x)
x = array(x).reshape((1, n))
self.weights = self.weights + 1./float(n) * dot(x.T, x)
self.weights = self.weights * (1 - eye(n))
def train(self, train_set):
'''
Presents a training set to the network
This method stores all the patterns of the training set in the weight
matrix. It calls the ``learn`` method for every pattern in the set.
:Parameters:
train_set
A list containing all the patterns to be stored in the network. Each
pattern is a vector of any dimensions, which are converted
internally to a column vector.
'''
for x in train_set:
self.learn(x)
def step(self, x):
'''
Performs a step in the recovering procedure
The algorithm for recovering the patterns stored in a Hopfield network
is an iterative algorithm which goes from a starting test pattern (a
stored pattern with noise) and recovers the noiseless version -- if
possible. This method takes the test pattern and performs one step of
the convergence
:Parameters:
x
The noisy pattern.
:Returns:
The result of one step of the convergence. This might be the same as
the input pattern, or the pattern with one component inverted.
'''
x = reshape(x, (self.inputs, 1))
k = randrange(self.size)
y = self.phi(dot(self.weights[:, k], x)[0])
if y != 0:
x[k, 0] = y
return x
def __call__(self, x, imax=2000, eqmax=100):
'''
Recovers a stored pattern
The ``__call__`` interface should be called if a memory needs to be
recovered from the network. Given a noisy pattern ``x``, the algorithm
will be executed until convergence or a maximum number of iterations
occur. This method repeatedly calls the ``step`` method until a stop
condition is reached. The stop condition is the maximum number of
iterations, or a number of iterations where no changes are found in the
retrieved pattern.
:Parameters:
x
The noisy pattern vector presented to the network.
imax
The maximum number of iterations the algorithm is to be repeated.
When this number of iterations is reached, the algorithm will stop,
whether the pattern was found or not. Defaults to 2000.
eqmax
The maximum number of iterations the algorithm will be repeated if
no changes occur in the retrieval of the pattern. At each iteration
of the algorithm, a component might change. It is considered that,
if a number of iterations are performed and no changes are found in
the pattern, then the algorithm converged, and it stops. Defaults to
100.
:Returns:
The vector containing the recovered pattern from the stored memories.
'''
i = 0
eq = 0
while i < imax and eq < eqmax:
xnew = self.step(x)
if any(xnew != x):
x = xnew
eq = 0
i = i + 1
eq = eq + 1
return x
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/kmeans.py
# Clustering for use in radial basis functions
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
K-Means clustering algorithm
This sub-package implements the K-Means clustering algorithm. This algorithm,
given a set of points, finds a set of vectors that best represents a partition
for these points. These vectors represent the center of a cloud of points that
are nearest to them.
This algorithm is one that can be used with radial basis function (RBF) networks
to find the centers of the RBFs. Usually, training a RBFN in two passes -- first
positioning them, and then computing their variance.
"""
################################################################################
from numpy import sum, argmin, array, mean, reshape
from numpy.random import standard_normal
################################################################################
# Functions
################################################################################
################################################################################
# Classifiers
# These functions classify a set of points associating them to centers according
# to a given metric. To create a classifier, the first parameter must be the set
# of points, and the second parameter must be the list of centers. No other
# parameters are needed.
def ClassByDistance(xs, c):
'''
Given a set of points and a list of centers, classify the points according
to their euclidian distance to the centers.
:Parameters:
xs
Set of points to be classified. They must be given as a list or array of
one-dimensional vectors, one per line.
c
Set of centers. Must also be given as a lista or array of
one-dimensional vectors, one per line.
:Returns:
A list of index of the classification. The indices are the position of the
cluster in the given parameters ``c``.
'''
res = [ ]
for x in xs:
dists = sum((x - c)**2, axis=1)
res.append(argmin(dists))
return res
################################################################################
# Clusterers
# These functions compute, from a set of points, a single vector that represents
# the cluster. To create a clusterer, the function needs only one parameter, the
# set of points to be clustered. This is given in form of a list. The function
# must return a single vector representing the cluster.
def ClusterByMean(x):
'''
This function computes the center of a cluster by averaging the vectors in
the input set by simply averaging each component.
:Parameters:
x
Set of points to be clustered. They must be given in the form of a list
or array of one-dimensional points.
:Returns:
A one-dimensional array representing the center of the cluster.
'''
return mean(x, axis=0)
################################################################################
# Classes
################################################################################
class KMeans(object):
'''
K-Means clustering algorithm
This class implements the known and very used K-Means clustering algorithm.
In this algorithm, the centers of the clusters are selected randomly. The
points on the training set are classified in accord to their closeness to
the cluster centers. This changes the positions of the centers, which
changes the classification of the points. This iteration is repeated until
no changes occur.
Traditional K-Means implementations classify the points in the training set
according to the euclidian distance to the centers, and centers are computed
as the average of the points associated to it. This is the default behaviour
of this implementation, but it is configurable. Please, read below for more
detail.
'''
def __init__(self, training_set, nclusters, classifier=ClassByDistance,
clusterer=ClusterByMean):
'''
Initializes the algorithm.
:Parameters:
training_set
A list or array of vectors containing the data to be classified.
Each of the vectors in this list *must* have the same dimension, or
the algorithm won't behave correctly. Notice that each vector can be
given as a tuple -- internally, everything is converted to arrays.
nclusters
The number of clusters to be found. This must be, of course, bigger
than 1. These represent the number of centers found once the
algorithm terminates.
classifier
A function that classifies each of the points in the training set.
This function receives the training set and a list of centers, and
classify each of the points according to the given metric. Please,
look at the documentation on these functions for more information.
Its default value is ``ClassByDistance` , which uses euclidian
distance as metric.
clusterer
A function that computes the center of the cluster, given a set of
points. This function receives a list of points and returns the
vector representing the cluster. For more information, look at the
documentation for these functions. Its default value is
``ClusterByMean``, in which the cluster is represented by the mean
value of the vectors.
'''
self.__nclusters = nclusters
self.__x = array(training_set)
self.__c = standard_normal((nclusters, len(self.__x[0])))
self.classify = classifier
self.cluster = clusterer
self.__xc = self.classify(self.__x, self.__c)
def __getc(self):
return self.__c
def __setc(self, c):
self.__c = array(reshape(c, self.__c.shape))
c = property(__getc, __setc)
'''A ``numpy`` array containing the centers of the classes in the algorithm.
Each line represents a center, and the number of lines is the number of
classes. This property is read and write, but care must be taken when
setting new centers: if the dimensions are not exactly the same as given in
the instantiation of the class (*ie*, *C* centers of dimension *N*, an
exception will be raised.'''
def step(self):
'''
This method runs one step of the algorithm. It might be useful to track
the changes in the parameters.
:Returns:
The computed centers for this iteration.
'''
x = self.__x
c = self.__c
xc = self.classify(x, c)
self.__xc = xc
cnew = [ ]
for i in range(self.__nclusters):
xi = [ xij for xij, clij in zip(x, xc) if clij == i ]
if xi:
cnew.append(self.cluster(array(xi)))
else:
cnew.append(standard_normal(c[i,:].shape))
return array(cnew)
def __call__(self, imax=20):
'''
The ``__call__`` interface is used to run the algorithm until
convergence is found.
:Parameters:
imax
Specifies the maximum number of iterations admitted in the execution
of the algorithm. It defaults to 20.
:Returns:
An array containing, at each line, the vectors representing the
centers of the clustered regions.
'''
i = 0
xc = [ ]
while i < imax and xc != self.__xc:
xc = self.__xc
self.__c = self.step()
i = i + 1
return self.__c
if __name__ == "__main__":
from random import shuffle
from basic import *
xs = [ ]
for i in range(7):
xs.append(array([ -1., -1. ] + 0.1*standard_normal((2,))))
for i in range(7):
xs.append(array([ 1., -1. ] + 0.1*standard_normal((2,))))
for i in range(7):
xs.append(array([ 0., 1. ] + 0.1*standard_normal((2,))))
#shuffle(xs)
k = KMeans(xs, 3)
c = k()
print c
xc = k.classify(xs, c)
for xx, xxc in zip(xs, xc):
print xx, xxc, c[xxc,:]
xs = array(xs)
a1 = start_square()
a1.hold(True)
a1.grid(True)
for xx in xs:
a1.scatter(xx[0], xx[1], c='black', marker='x')
a1.scatter(c[:,0], c[:,1], c='red', marker='o')
savefig('kmeans.png') | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/lrules.py
# Learning rules for neural networks
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Learning rules for neural networks and base classes for custom learning.
This sub-package implements learning methods commonly used with neural networks.
There are a lot of different topologies and different learning methods for each
one. It is very difficult to find a consistent framework for defining learning
methods, in consequence. This method defines some base classes that are coupled
with the neural networks that they are supposed to work with. Also, based on
these classes, some of the traditional methods are implemented.
If you want to implement a different learning method, you must subclass the
correct base class. Consult the classes below. Also, pay attention to how the
implementation is expected to behave. Since learning algorithms are usually
somewhat complex, care should be taken to make everything work accordingly.
"""
################################################################################
from numpy import ones, hstack, reshape, dot, sum, exp
_BIAS = ones((1, 1), dtype=float)
"""This constant vector is defined to implement in a fast way the bias of a
neuron, as an input of value 1, stacked over the real input to the neuron."""
################################################################################
# Classes
################################################################################
class FFLearning(object):
'''
Base class for FeedForwarding Multilayer neural networks.
As a base class, this class doesn't do anything. You should subclass this
class if you want to implement a learning method for multilayer networks.
A learning method for a neural net of this kind must deal with a
``FeedForward`` instance. A ``FeedForward`` object is a list of ``Layers``
(consulting the documentation of these classes is important!). Each layer is
a bidimensional array, where each line represents the synaptic weights of a
single neuron. So, a multilayer network is actually a three-dimensional
array, if you will. Usually, though, learning methods for this kind of net
propagate some measure of the error from the output back to the input (the
``BackPropagation`` method, for instance).
A class implementing a learning method should have at least two methods:
__init__
The ``__init__`` method should initialize the object. It is in general
used to configure some property of the learning algorithm, such as the
learning rate.
__call__
The ``__call__`` interface is how the method should interact with the
neural network. It should have the following signature::
__call__(self, nn, x, d)
where ``nn`` is the ``FeedForward`` instance to be modified *in loco*,
``x`` is the input vector and ``d`` is the desired response of the net
for that particular input vector. It should return nothing.
'''
def __call__(self, nn, x, d):
'''
The ``__call__`` interface.
Read the documentation for this class for more information. A call to
the class should have the following parameters:
:Parameters:
nn
A ``FeedForward`` neural network instance that is going to be
modified by the learning algorithm. The modification is made *in
loco*, that is, the synaptic weights of ``nn`` should be modified
in place, and not returned from this function.
x
The input vector from the training set.
d
The desired response for the given input vector.
'''
raise NotImplementedError, 'learning rule not defined'
################################################################################
class LMS(FFLearning):
'''
The Least-Mean-Square (LMS) learning method.
The LMS method is a very simple method of learning, thoroughly described in
virtually every book about the subject. Please, consult a good book on
neural networks for more information. This implementation tries to use the
``numpy`` routines as much as possible for better efficiency.
'''
def __init__(self, lrate=0.05):
'''
Initializes the object.
:Parameters:
lrate
Learning rate to be used in the algorithm. Defaults to 0.05.
'''
self.lrate = lrate
'''Learning rate used in the algorithm.'''
def __call__(self, nn, x, d):
'''
The ``__call__`` interface.
The learning implementation. Read the documentation for the base class
for more information. A call to the class should have the following
parameters:
:Parameters:
nn
A ``FeedForward`` neural network instance that is going to be
modified by the learning algorithm. The modification is made *in
loco*, that is, the synaptic weights of ``nn`` should be modified
in place, and not returned from this function.
x
The input vector from the training set.
d
The desired response for the given input vector.
'''
# g would be like the local error gradient for each neuron. In LMS, this
# serves only to propagate the error
d = reshape(d, (nn.y.shape))
g = d - nn.y
# The error is backpropagated, thus the lists are inverted. To combine
# each layer, we ``zip`` them.
for w1, w2 in zip(nn[:-1][::-1], nn[1:][::-1]):
# xs is the input vector, transposed because of backpropagation.
xs = w1.y.transpose()
# Adjusts for bias.
if w2.bias:
xs = hstack((_BIAS, xs))
wt = w2.weights[:, 1:].transpose()
else:
wt = w2.weights.transpose()
# Update synaptic weights
dw = self.lrate * dot(g, xs)
w2.weights = w2.weights + dw
# Backpropagate the error.
g = dot(wt, g)
# Repeat the procedure for the first layer.
w = nn[0]
xs = x.reshape((1, w.inputs))
if w.bias:
xs = hstack((_BIAS, xs))
dw = self.lrate * dot(g, xs)
w.weights = w.weights + dw
WidrowHoff = LMS
'''Alias for the LMS class'''
DeltaRule = LMS
'''Alias for the LMS class'''
################################################################################
class BackPropagation(FFLearning):
'''
The BackPropagation learning method.
The backpropagation method is a very simple method of learning, thoroughly
described in virtually every book about the subject. Please, consult a good
book on neural networks for more information. This implementation tries to
use the ``numpy`` routines as much as possible for better efficiency.
'''
def __init__(self, lrate=0.05):
'''
Initializes the object.
:Parameters:
lrate
Learning rate to be used in the algorithm. Defaults to 0.05.
'''
self.lrate = lrate
'''Learning rate used in the algorithm.'''
def __call__(self, nn, x, d):
'''
The ``__call__`` interface.
The learning implementation. Read the documentation for the base class
for more information. A call to the class should have the following
parameters:
:Parameters:
nn
A ``FeedForward`` neural network instance that is going to be
modified by the learning algorithm. The modification is made *in
loco*, that is, the synaptic weights of ``nn`` should be modified
in place, and not returned from this function.
x
The input vector from the training set.
d
The desired response for the given input vector.
'''
# g is the local error gradient for each neuron.
d = reshape(d, (nn.y.shape))
g = (d - nn.y) * nn[-1].phi.d(nn[-1].v)
# The error is backpropagated, thus the lists are inverted. To combine
# each layer, we ``zip`` them.
for w1, w2 in zip(nn[:-1][::-1], nn[1:][::-1]):
# xs is the input vector, transposed because of backpropagation.
xs = w1.y.transpose()
# Adjusts for bias.
if w2.bias:
xs = hstack((_BIAS, xs))
wt = w2.weights[:, 1:].transpose()
else:
wt = w2.weights.transpose()
# Update synaptic weights
dw = self.lrate * dot(g, xs)
w2.weights = w2.weights + dw
# Backpropagate the error.
g = dot(wt, g) * w1.phi.d(w1.v)
# Repeat the procedure for the first layer.
w = nn[0]
xs = x.reshape((1, w.inputs))
if w.bias:
xs = hstack((_BIAS, xs))
dw = self.lrate * dot(g, xs)
w.weights = w.weights + dw
################################################################################
class SOMLearning(object):
'''
Base class for Self-Organizing Maps.
As a base class, this class doesn't do anything. You should subclass this
class if you want to implement a learning method for self-organizing maps.
A learning method for a neural net of this kind must deal with a ``SOM``
instance. A ``SOM`` object is a ``Layer`` (consulting the documentation of
these classes is important!).
A class implementing a learning method should have at least two methods:
__init__
The ``__init__`` method should initialize the object. It is in general
used to configure some property of the learning algorithm, such as the
learning rate.
__call__
The ``__call__`` interface is how the method should interact with the
neural network. It should have the following signature::
__call__(self, nn, x)
where ``nn`` is the ``SOM`` instance to be modified *in loco*, and ``x``
is the input vector. It should return nothing.
'''
def __call__(self, nn, x, d):
'''
The ``__call__`` interface.
Read the documentation for this class for more information. A call to
the class should have the following parameters:
:Parameters:
nn
A ``SOM`` neural network instance that is going to be modified by
the learning algorithm. The modification is made *in loco*, that is,
the synaptic weights of ``nn`` should be modified in place, and not
returned from this function.
x
The input vector from the training set.
'''
raise NotImplementedError, 'learning rule not defined'
################################################################################
class WinnerTakesAll(SOMLearning):
'''
Purely competitive learning method without learning rate adjust.
A winner-takes-all strategy detects the winner on the self-organizing map
and adjusts it in the direction of the input vector, scaled by the learning
rate. Its tendency is to cluster around the gravity center of the points in
the training set.
'''
def __init__(self, lrate=0.05):
'''
Initializes the object.
:Parameters:
lrate
Learning rate to be used in the algorithm. Defaults to 0.05.
'''
self.lrate = lrate
'''Learning rate used with the algorithm.'''
def __call__(self, nn, x):
'''
The ``__call__`` interface.
The learning implementation. Read the documentation for the base class
for more information. A call to the class should have the following
parameters:
:Parameters:
nn
A ``SOM`` neural network instance that is going to be modified by
the learning algorithm. The modification is made *in loco*, that is,
the synaptic weights of ``nn`` should be modified in place, and not
returned from this function.
x
The input vector from the training set.
'''
xs = x.reshape((nn.inputs, 1))
i = nn.y
w = nn.weights
dw = self.lrate * (x - w[i])
w[i] = w[i] + dw
WTA = WinnerTakesAll
'''Alias for the ``WinnerTakesAll`` class'''
################################################################################
class Competitive(SOMLearning):
'''
Competitive learning with time adjust of the learning rate.
A competitive strategy detects the winner on the self-organizing map and
adjusts it in the direction of the input vector, scaled by the learning
rate. Its tendency is to cluster around the gravity center of the points in
the training set. As time passes, the learning rate grows smaller, this
allows for better adjustment of the synaptic weights.
'''
def __init__(self, lrate=0.05, tl=1000.):
'''
Initializes the object.
:Parameters:
lrate
Learning rate to be used in the algorithm. Defaults to 0.05.
tl
Time constant that measures how many iterations will be needed to
reduce the learning rate to a small value. Defaults to 1000.
'''
self.lrate = lrate
self.__lrate = 1.0
self.__lrm = exp(-1.0/float(tl))
def __call__(self, nn, x):
'''
The ``__call__`` interface.
The learning implementation. Read the documentation for the base class
for more information. A call to the class should have the following
parameters:
:Parameters:
nn
A ``SOM`` neural network instance that is going to be modified by
the learning algorithm. The modification is made *in loco*, that is,
the synaptic weights of ``nn`` should be modified in place, and not
returned from this function.
x
The input vector from the training set.
'''
xs = x.reshape((nn.inputs, 1))
i = nn.y
w = nn.weights
# Adjusts the learning rate according to an exponential rule
lrate = self.lrate * self.__lrate
self.__lrate = self.__lrate * self.__lrm
# Updates the weights
dw = lrate * (x - w[i])
w[i] = w[i] + dw
################################################################################
class Cooperative(SOMLearning):
'''
Cooperative learning with time adjust of the learning rate and neighborhood
function to propagate cooperation
A cooperative strategy detects the winner on the self-organizing map and
adjusts it in the direction of the input vector, scaled by the learning
rate. Its tendency is to cluster around the gravity center of the points in
the training set. As time passes, the learning rate grows smaller, this
allows for better adjustment of the synaptic weights.
Also, a neighborhood is defined on the winner. Neurons close to the winner
are also updated in the direction of the input vector, although with a
smaller scale determined by the neighborhood function. A neighborhood
function is 1. at 0., and decreases monotonically as the distance increases.
*There are issues with this class!* -- some of the class capabilities are
yet to be developed.
'''
def __init__(self, lrate=0.05, tl=1000, tn=1000):
'''
Initializes the object.
:Parameters:
lrate
Learning rate to be used in the algorithm. Defaults to 0.05.
tl
Time constant that measures how many iterations will be needed to
reduce the learning rate to a small value. Defaults to 1000.
tn
Time constant that measures how many iterations will be needed to
shrink the neighborhood. Defaults to 1000.
'''
self.__neighbor = 1.0
self.__lrate = 1.0
self.__lrm = exp(-1.0/float(tl))
self.__nbm = exp(-1.0/float(tn))
self.__s0 = float(s0)
def __call__(self, nn, x):
'''
The ``__call__`` interface.
The learning implementation. Read the documentation for the base class
for more information. A call to the class should have the following
parameters:
:Parameters:
nn
A ``SOM`` neural network instance that is going to be modified by
the learning algorithm. The modification is made *in loco*, that is,
the synaptic weights of ``nn`` should be modified in place, and not
returned from this function.
x
The input vector from the training set.
'''
xs = x.reshape((nn.inputs, 1))
i = nn.y
w = nn.weights
wi = nn.weights[i, :]
# Adjusts the learning rate according to an exponential rule
lrate = nn.lrate * self.__lrate
self.__lrate = self.__lrate * self.__lrm
# Apply neighborhood function.
s = self.__s0 * self.__neighbor
self.__neighbor = self.__neighbor * self.__nbm
d = sum((wi - w)**2, axis=1)
h = exp(-d/(2*s**2))
# Updates the weights
dw = lrate * h * (x - w).transpose()
w = w + dw.transpose()
nn.weights = w
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: nn/__init__.py
# Makes the nn directory a python package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements support for neural networks. Consult:
base
Basic definitions of the objects used with neural networks;
af
A list of activation functions for use with neurons and a base class to
implement different activation functions;
lrule
Learning rules;
nnet
Implementation of different classes of neural networks;
mem
Associative memories and Hopfield model;
kmeans
K-Means implementation for use with Radial Basis Networks;
rbfn
Radial Basis Function Networks;
"""
# __all__ = [ 'base', 'af', 'lrules', 'nnet', 'mem', 'kmeans', 'rbfn' ]
################################################################################
# Imports sub-packages
from peach.nn.base import *
from peach.nn.af import *
from peach.nn.lrules import *
from peach.nn.nnet import *
from peach.nn.mem import *
from peach.nn.kmeans import *
from peach.nn.rbfn import *
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/ga.py
# Basic genetic algorithm
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic Genetic Algorithm (GA)
This sub-package implements a traditional genetic algorithm as described in
books and papers. It consists of selecting, breeding and mutating a population
of chromosomes (arrays of bits) and reinserting the fittest individual from the
previous generation if the GA is elitist. Please, consult a good reference on
the subject, for the subject is way too complicated to be explained here.
Within the algorithm implemented here, it is possible to specify and configure
the selection, crossover and mutation methods using the classes in the
respective sub-modules and custom methods can be implemented (check
``selection``, ``crossover`` and ``mutation`` modules).
A GA object is actually a list of chromosomes. Please, refer to the
documentation of the class below for more information.
"""
################################################################################
from numpy import zeros, argmax, any, isnan, array
from numpy.random import uniform
import types
from chromosome import *
from fitness import *
from selection import *
from crossover import *
from mutation import *
################################################################################
# Classes
################################################################################
class GeneticAlgorithm(list):
'''
A standard Genetic Algorithm
This class implements the methods to generate, initialize and evolve a
population of chromosomes according to a given fitness function. A standard
GA implements, in this order:
- A selection method, to choose, from this generation, which individuals
will be present in the next generation;
- A crossover method, to exchange information between selected individuals
to add diversity to the population;
- A mutation method, to change information in a selected individual, also
to add diversity to the population;
- The reinsertion of the fittest individual, if the population is elitist
(which is almost always the case).
A population is actually a list of chromosomes, and individuals can be
read and set as in a normal list. Use the ``[ ]`` operators to access
individual chromosomes but please be aware that modifying the information on
the list before the end of convergence can cause unpredictable results. The
population and the algorithm have also other properties, check below to see
more information on them. '''
def __init__(self, f, x0, ranges=[ ], fmt='f', fitness=Fitness,
selection=RouletteWheel, crossover=TwoPoint,
mutation=BitToBit, elitist=True):
'''
Initializes the population and the algorithm.
On the initialization of the population, a lot of parameters can be set.
Those will deeply affect the results. The parameters are:
:Parameters:
f
A multivariable function to be evaluated. The nature of the
parameters in the objective function will depend of the way you want
the genetic algorithm to process. It can be a standard function that
receives a one-dimensional array of values and computes the value of
the function. In this case, the values will be passed as a tuple,
instead of an array. This is so that integer, floats and other types
of values can be passed and processed. In this case, the values will
depend of the format string (see below)
If you don't supply a format, your objective function will receive a
``Chromosome`` instance, and it is the responsability of the
function to decode the array of bits in any way. Notice that, while
it is more flexible, it is certainly more difficult to deal with.
Your function should process the bits and compute the return value
which, in any case, should be a scalar.
Please, note that genetic algorithms maximize functions, so project
your objective function accordingly. If you want to minimize a
function, return its negated value.
x0
A population of first estimates. This is a list, array or tuple of
one-dimension arrays, each one corresponding to an estimate of the
position of the minimum. The population size of the algorithm will
be the same as the number of estimates in this list. Each component
of the vectors in this list are one of the variables in the function
to be optimized.
ranges
Since messing with the bits can change substantially the values
obtained can diverge a lot from the maximum point. To avoid this,
you can specify a range for each of the variables. ``range``
defaults to ``[ ]``, this means that no range checkin will be done.
If given, then every variable will be checked. There are two ways to
specify the ranges.
It might be a tuple of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. If ``range`` is given in this way, then this
range will be used for every variable.
It can be specified as a list of tuples with the same format as
given above. In that case, the list must have one range for every
variable specified in the format and the ranges must appear in the
same order as there. That is, every variable must have a range
associated to it.
fmt
A ``struct``-format string. The ``struct`` module is a standard
Python module that packs and unpacks informations in bits. These
are used to inform the algorithm what types of data are to be used.
For example, if you are maximizing a function of three real
variables, the format should be something like ``"fff"``. Any type
supported by the ``struct`` module can be used. The GA will decode
the bit array according to this format and send it as is to your
fitness function -- your function *must* know what to do with them.
Alternatively, the format can be an integer. In that case, the GA
will not try to decode the bit sequence. Instead, the bits are
passed without modification to the objective function, which must
deal with them. Notice that, if this is used this way, the
``ranges`` property (see below) makes no sense, so it is set to
``None``. Also, no sanity checks will be performed.
It defaults to `"f"`, that is, a single floating point variable.
fitness
A fitness method to be applied over the objective function. This
parameter must be a ``Fitness`` instance or subclass. It will be
applied over the objective function to compute the fitness of every
individual in the population. Please, see the documentation on the
``Fitness`` class.
selection
This specifies the selection method. You can use one given in the
``selection`` sub-module, or you can implement your own. In any
case, the ``selection`` parameter must be an instance of
``Selection`` or of a subclass. Please, see the documentation on the
``selection`` module for more information. Defaults to
``RouletteWheel``. If made ``None``, then selection will not be
present in the GA.
crossover
This specifies the crossover method. You can use one given in the
``crossover`` sub-module, or you can implement your own. In any
case, the ``crossover`` parameter must be an instance of
``Crossover`` or of a subclass. Please, see the documentation on the
``crossover`` module for more information. Defaults to
``TwoPoint``. If made ``None``, then crossover will not be
present in the GA.
mutation
This specifies the mutation method. You can use one given in the
``mutation`` sub-module, or you can implement your own. In any
case, the ``mutation`` parameter must be an instance of ``Mutation``
or of a subclass. Please, see the documentation on the ``mutation``
module for more information. Defaults to ``BitToBit``. If made
``None``, then mutation will not be present in the GA.
elitist
Defines if the population is elitist or not. An elitist population
will never discard the fittest individual when a new generation is
computed. Defaults to ``True``.
'''
list.__init__(self, [ ])
self.__fx = [ ]
for x in x0:
x = array(x).ravel()
c = Chromosome(fmt)
c.encode(tuple(x))
self.append(c)
self.__fx.append(f(x))
self.__f = f
self.__csize = self[0].size
self.elitist = elitist
'''If ``True``, then the population is elitist.'''
if type(fmt) == int:
self.ranges = None
elif ranges is None:
self.ranges = zip(amin(self, axis=0), amax(self, axis=1))
else:
ranges = list(ranges)
if len(ranges) == 1:
self.ranges = array(ranges * len(x0[0]))
else:
self.ranges = array(ranges)
'''Holds the ranges for every variable. Although it is a
writable property, care should be taken in changing parameters
before ending the convergence.'''
# Sanitizes the first estimate. It is not expected that the values
# received as first estimates are outside the ranges, but a check is
# made anyway. If any estimate is outside the bounds, a new random
# vector is choosen.
if self.ranges is not None: self.sanity()
# Verifies the validity of the fitness method
try:
issubclass(fitness, Fitness)
fitness = fitness()
except TypeError:
pass
if not isinstance(fitness, Fitness):
raise TypeError, 'not a valid fitness function'
else:
self.__fit = fitness
self.__fitness = self.__fit(self.__fx)
# Verifies the validity of the selection method
try:
issubclass(selection, Selection)
selection = selection()
except TypeError:
pass
if not isinstance(selection, Selection):
raise TypeError, 'not a valid selection method'
else:
self.__select = selection
# Verifies the validity of the crossover method
try:
issubclass(crossover, Crossover)
crossover = crossover()
except TypeError:
pass
if not isinstance(crossover, Crossover) and crossover is not None:
raise TypeError, 'not a valid crossover method'
else:
self.__crossover = crossover
# Verifies the validity of the mutation method
try:
issubclass(mutation, Mutation)
mutation = mutation()
except TypeError:
pass
if not isinstance(mutation, Mutation) and mutation is not None:
raise TypeError, 'not a valid mutation method'
else:
self.__mutate = mutation
def __get_csize(self):
return self.__csize
chromosome_size = property(__get_csize, None)
'''This property hold the chromosome size for the population. Not
writable.'''
def __get_fx(self):
return self.__fx
fx = property(__get_fx, None)
'''Array containing the fitness value for every estimate in the
population. Not writeable.'''
def __get_best(self):
m = argmax(self.__fx)
return self[m]
best = property(__get_best, None)
'''Single vector containing the position of the best point found by all the
individuals. Not writeable.'''
def __get_fbest(self):
m = argmax(self.__fx)
return self.__fx[m]
fbest = property(__get_fbest, None)
'''Single scalar value containing the function value of the best point by
all the individuals. Not writeable.'''
def __get_fit(self):
return self.__fitness
fitness = property(__get_fit, None)
'''Vector containing the fitness value for every individual in the
population. This is not the same as the objective function value. Not
writeable.'''
def sanity(self):
'''
Sanitizes the chromosomes in the population.
Since not every individual generated by the crossover and mutation
operations might be a valid result, this method verifies if they are
inside the allowed ranges (or if it is a number at all). Each invalid
individual is discarded and a new one is generated.
This method has no parameters and returns no values.
'''
r = self.ranges
x0 = r[:, 0]
x1 = r[:, 1]
for c in self:
xs = c.decode()
if any(xs < x0) or any(xs > x1) or any(isnan(xs)):
xs = [ uniform(r0, r1) for r0, r1 in r ]
c.encode(tuple(xs))
def restart(self, x0):
'''
Resets the optimizer, allowing the use of a new set of estimates. This
can be used to avoid stagnation.
:Parameters:
x0
A new set of estimates. It doesn't need to have the same size of the
original population, but it must be a list of estimates in the same
format as in the object instantiation. Please, see the documentation
on the instantiation of the class.
'''
self.__fx = [ ]
for x in x0:
x = array(x).ravel()
c = Chromosome(fmt)
c.encode(tuple(x))
self.append(c)
self.__fx.append(f(x))
def step(self):
'''
Computes a new generation of the population, a step of the adaptation.
This method goes through all the steps of the GA, as described above. If
the selection, crossover and mutation operators are defined, they are
applied over the population. If the population is elitist, then the
fittest individual of the past generation is reinserted.
This method has no parameters and returns no values. The GA itself can
be consulted (using ``[ ]``) to find the fittest individual which is the
result of the process.
'''
f = self.__f
if self.elitist:
max_fit = Chromosome(self.best)
self.__select(self)
if self.__crossover is not None: self.__crossover(self)
if self.__mutate is not None: self.__mutate(self)
if self.ranges is not None: self.sanity()
if self.elitist:
self[0] = max_fit
self.__fx = [ f(c.decode()) for c in self ]
self.__fitness = self.__fit(self.__fx)
return self.best, self.fbest
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
x, e = self.step()
i = i + 1
return x, e
class GA(GeneticAlgorithm):
'''
GA is an alias to ``GeneticAlgorithm``
'''
pass
################################################################################
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/mutation.py
# Basic definitions for mutation on chromosomes
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitions and classes for operating mutation on chromosomes.
The mutation operator changes selected bits in the array corresponding to the
chromosome. This operation is not as common as the others, but some genetic
algorithms still implement it.
"""
################################################################################
from numpy.random import uniform
################################################################################
# Classes
################################################################################
class Mutation(object):
'''
Base class for mutation operators.
This class should be subclassed if you want to create your own mutation
operator. The base class doesn't do much, it is only a prototype. As is done
with all the base classes within this library, use the ``__init__`` method
to configure your mutation behaviour -- if needed -- and the ``__call__``
method to operate over a population.
A class derived from this one should implement at least 2 methods, defined
below:
__init__(self, *cnf, **kw)
Initializes the object. There is no mandatory arguments, but any
parameters can be used here to configure the operator. For example, a
class can define a mutation rate -- this should be defined here::
__init__(self, rate=0.75)
A default value should always be offered, if possible.
__call__(self, population)
The ``__call__`` implementation should receive a population and operate
over it. Please, consult the ``ga`` module to see more information on
populations. It should return the processed population. No recomendation
on the internals of the method is made.
Please, note that the GA implementations relies on this behaviour: it will
pass a population to your ``__call__`` method and expects to received the
result back.
'''
pass
################################################################################
class BitToBit(Mutation):
'''
A simple bit-to-bit mutation operator.
This operator scans every individual in the population, in a bit-to-bit
fashion. If a uniformly random number is less than the mutation rate (see
below), then the bit is inverted. The mutation should be made very small,
since large populations will represent a big number of bits; it should never
be more than 0.5.
'''
def __init__(self, rate=0.05):
'''
Initialize the mutation operator.
:Parameters:
rate
Probability that a single bit in an individual will be inverted.
'''
self.rate = rate
'''Property that contains the mutation rate.'''
def __call__(self, population):
'''
Applies the operator over a population.
The behaviour of this operator is as described above: it scans every bit
in every individual, and if a random number is less than the mutation
rate, the bit is inverted.
:Parameters:
population
A list of ``Chromosomes`` containing the present population of the
algorithm. It is processed and the results of the exchange are
returned to the caller.
:Returns:
The processed population, a list of ``Chromosomes``.
'''
rate = self.rate
for c in population:
for j in xrange(population.chromosome_size):
if uniform(0., 1.) < rate:
c[j] = ~c[j]
return population
################################################################################
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/chrossover.py
# Basic definitions for crossover among chromosomes
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic classes and definitions for selection operator.
The first step in a genetic algorithm is the selection of the fittest
individuals. The selection method typically uses the fitness of the population
to compute which individuals are closer to the best solution. However, instead
of deterministically deciding which individuals continue to the next generation,
they are randomly choosen, the chances of an individual being choosen given by
its fitness value. This sub-module implements selection methods.
"""
################################################################################
from numpy import add
from numpy.random import uniform
from random import randrange
from chromosome import *
################################################################################
# Classes
################################################################################
class Selection(object):
'''
Base class for selection operators.
This class should be subclassed if you want to create your own selection
operator. The base class doesn't do much, it is only a prototype. As is done
with all the base classes within this library, use the ``__init__`` method
to configure your selection behaviour -- if needed -- and the ``__call__``
method to operate over a population.
A class derived from this one should implement at least 2 methods, defined
below:
__init__(self, *cnf, **kw)
Initializes the object. There is no mandatory arguments, but any
parameters can be used here to configure the operator. A default value
should always be offered, if possible.
__call__(self, population)
The ``__call__`` implementation should receive a population and operate
over it. Please, consult the ``ga`` module to see more information on
populations. It should return the processed population. No recomendation
on the internals of the method is made.
Please, note that the GA implementations relies on this behaviour: it will
pass a population to your ``__call__`` method and expects to received the
result back.
'''
pass
################################################################################
class RouletteWheel(Selection):
'''
The Roulette Wheel selection method.
This method randomly chooses a new population with the same size of the
original population. An individual is choosen with a probability
proportional to its fitness value, independent of what fitness method was
used. This is usually abstracted as a roulette wheel in texts about the
subject. Please, note that the selection is done *in loco*, that is,
although the new population is returned, it is not a new list -- it is the
same list as before, but with values changed.
'''
def __call__(self, population):
'''
Selects the population.
:Parameters:
population
The list of chromosomes that should be operated over. The given list
is modified, so be aware that the old generation will not be
available after stepping the GA.
:Returns:
The new population.
'''
facc = add.accumulate(population.fitness)
newp = [ ]
for j in xrange(len(population)):
rs = uniform(0., 1.)
si = 0
while rs > facc[si]:
si = si + 1
newp.append(Chromosome(population[si]))
population[:] = newp
return population
################################################################################
class BinaryTournament(Selection):
'''
The Binary Tournament selection method.
This method randomly chooses a new population with the same size of the
original population. Two individuals are choosen at random and they
"battle", the fittest surviving for the next generation. Please, note that
the selection is done *in loco*, that is, although the new population is
returned, it is not a new list -- it is the same list as before, but with
values changed.
'''
def __call__(self, population):
'''
Selects the population.
:Parameters:
population
The list of chromosomes that should be operated over. The given list
is modified, so be aware that the old generation will not be
available after stepping the GA.
:Returns:
The new population.
'''
facc = add.accumulate(population.fitness)
newp = [ ]
l = len(population)
for j in xrange(l):
m = randrange(l)
n = randrange(l)
if facc[m] > facc[n]:
newp.append(Chromosome(population[m]))
else:
newp.append(Chromosome(population[n]))
population[:] = newp
return population
################################################################################
class Baker(Selection):
'''
The Baker selection method.
This method is very similar to the Roulette Wheel, but instead or randomly
choosing every new member on the next generation, only the first probability
is randomized. The others are determined as equally spaced numbers from 0 to
1, from this number. Please, note that the selection is done *in loco*, that
is, although the new population is returned, it is not a new list -- it is
the same list as before, but with values changed.
'''
def __call__(self, population):
'''
Selects the population.
:Parameters:
population
The list of chromosomes that should be operated over. The given list
is modified, so be aware that the old generation will not be
available after stepping the GA.
:Returns:
The new population.
'''
facc = add.accumulate(population.fitness)
newp = [ ]
cs = uniform(0., 1.)
si = 0
l = len(population)
l1 = 1. / l
for j in xrange(l):
while facc[si] < cs:
si = (si + 1) % l
newp.append(Chromosome(population[si]))
cs = (cs + l1) % 1
population[:] = newp
return population
################################################################################
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/chromosome.py
# Basic definitions for manipulating chromosomes
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitions and classes for manipulating chromosomes
This sub-package is a vital part of the genetic algorithms framework within the
module. This uses the ``bitarray`` module to implement a chromosome as an array
of bits. It is, thus, necessary that this module is installed in your Python
system. Please, check within the Python website how to install the ``bitarray``
module.
The class defined in this module is derived from ``bitarray`` and can also be
derived if needed. In general, users or programmers won't need to instance this
class directly -- it is manipulated by the genetic algorithm itself. Check the
class definition for more information.
"""
################################################################################
from bitarray import bitarray
import struct
import types
################################################################################
# Classes
################################################################################
class Chromosome(bitarray):
'''
Implements a chromosome as a bit array.
Data is structured according to the ``struct`` module that exists in the
Python standard library. Internally, data used in optimization with a
genetic algorithm are represented as arrays of bits, so the ``bitarray``
module must be installed. Please consult the Python package index for more
information on how to install ``bitarray``. In general, the user don't need
to worry about how the data is manipulated internally, but a specification
of the format as in the ``struct`` module is needed.
If the internal format of the data is specified as an ``struct`` format, the
genetic algorithm will take care of encoding and decoding data from and to
the optimizer. However, it is possible to specify, instead of a format, the
length of the chromosome. In that case, the fitness function must deal with
the encoding and decoding of the information. It is strongly suggested that
you use ``struct`` format strings, as they are much easier. This second
option is provided as a convenience.
The ``Chromosome`` class is derived from the ``bitarray`` class. So, every
property and method of this class should be accessible.
'''
def __new__(cls, fmt='', endian='little'):
'''
Allocates new memory space for the chromosome
This function overrides the ``bitarray.__new__`` function to deal with
the length of the chromosome. It should never be directly used, as it is
automatically called by the Python interpreter in the moment of object
creation.
:Returns:
A new ``Chromosome`` object.
'''
if type(fmt) == int:
return bitarray.__new__(cls, fmt)
elif type(fmt) == str:
size = struct.calcsize(fmt) * 8
return bitarray.__new__(cls, size)
elif isinstance(fmt, bitarray):
return bitarray.__new__(cls, fmt)
def __init__(self, fmt=''):
'''
Initializes the chromosome.
This method is automatically called by the Python interpreter and
initializes the data in the chromosome. No data should be provided to be
encoded in the chromosome, as it is usually better start with random
estimates. This method, in particular, does not clear the memory used in
the time of creation of the ``bitarray`` from which a ``Chromosome``
derives -- so the random noise in the memory is used as initial value.
:Parameters:
fmt
This parameter can be passed in two different ways. If ``fmt`` is a
string, then it is assumed to be a ``struct``-format string. Its
size is calculated and a ``bitarray`` of the corresponding size is
created. Please, consult the ``struct`` documentation, since what is
explained there is exactly what is used here. For example, if you
are going to use the optimizer to deal with three-dimensional
vectors of continuous variables, the format would be something
like::
fmt = 'fff'
If ``fmt``, however, is an integer, then a ``bitarray`` of the given
length is created. Note that, in this case, no format is given to
the chromosome, and it is responsability of the programmer and the
fitness function to provide for it.
Default value is an empty string.
'''
if type(fmt) == int:
self.__size = fmt
self.format = None
elif type(fmt) == str:
self.__size = len(self)
self.format = fmt
elif isinstance(fmt, bitarray):
self.__size = len(self)
self.format = fmt.format
'''Property that contains the chromosome ``struct`` format.'''
def __get_size(self):
return self.__size
size = property(__get_size, None)
'''Property that returns the chromosome size. Not writable.'''
def decode(self):
'''
This method decodes the information given in the chromosome.
Data in the chromosome is encoded as a ``struct``-formated string in a
``bitarray`` object. This method decodes the information and returns the
encoded values. If a format string is not given, then it is assumed that
this chromosome is just an array of bits, which is returned.
:Returns:
A tuple containing the decoded values, in the order specified by the
format string.
'''
if self.format is None:
return self
return struct.unpack(self.format, self.tostring())
def encode(self, values):
'''
This method encodes the information into the chromosome.
Data in the chromosome is encoded as a ``struct``-formated string in a
``bitarray`` object. This method encodes the given information in the
bitarray. If a format string is not given, this method raises a
``TypeError`` exception.
:Parameters:
values
A tuple containing the values to be encoded in an order consistent
with the given ``struct``-format.
'''
if self.format is None:
raise TypeError, 'no encoding/decoding format available'
tmp = bitarray()
tmp.fromstring(struct.pack(self.format, *values))
self[:] = tmp
################################################################################
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/fitness.py
# Basic definitions for declaring fitness functions
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitions and base classes for definition of fitness functions for use
with genetic algorithms.
Fitness is a function that rates higher the chromosomes that perform better
according to the objective function. For example, if the minimum of a function
needs to be found, then the fitness function should rate better the chromosomes
that correspond to lower values of the objective function. This module gives
support to use common Python functions as fitness functions in genetic
algorithms.
The classes defined in this sub-module take a function and use some algorithm to
rank a population. There are some different ranking functions, some are provided
in this module. There is also a class that can be subclassed to generate other
fitness methods. See the documentation of the corresponding class for more
information.
"""
################################################################################
from numpy import min, sum, argsort, zeros
################################################################################
# Classes
################################################################################
class Fitness(object):
'''
Base class for fitness function classifiers.
This class is used as the base of all fitness functions. However, even if
it is intended to be used as a base class, it also provides some
functionality, described below.
A subclass of this class should implement at least 2 methods:
__init__(self, *args, **kwargs)
Initialization method. The initialization procedure doesn't need to take
any parameters, but if any configuration must be done, it should be
passed as an argument to the ``__init__`` function. The genetic
algorithm, however, does not expect parameters in the instantiation, so
you should provide sensible defaults.
__call__(self, fx)
This method is called to calculate population fitness. There is no
recomendation about the internals of the method, but its signature is
expected as defined above. This method receives the values of the
objective function applied over a population -- please, consult the
``ga`` module for more information on populations -- and should return a
vector or list with the fitness value for each chromosome in the same
order that they appear in the population.
This class implements the standard normalization fitness, as described in
every book and article about GAs. The rank given to a chromosome is
proportional to its objective function value.
'''
def __init__(self):
'''
Initializes the operator.
'''
pass
def __call__(self, fx):
'''
Calculates the fitness for all individuals in the population.
:Parameters:
fx
The values of the objective function for every individual on the
population to be processed. Please, consult the ``ga`` module for
more information on populations. This method calculates the fitness
according to the traditional normalization technique.
:Returns:
A vector containing the fitness value for every individual in the
population, in the same order that they appear there.
'''
fx = fx - min(fx)
return fx / sum(fx)
################################################################################
class Ranking(Fitness):
'''
Ranking fitness for a population
Ranking gives fitness values equally spaced between 0 and 1. The fittest
individual receives fitness equals to 1, the second best equals to 1 - 1/N,
the third best 1 - 2/N, and so on, where N is the size of the population.
It is important to note that the worst fit individual receives a fitness
value of 1/N, not 0. That allows that no individuals are excluded from the
selection operator.
'''
def __init__(self):
'''
Initializes the operator.
'''
Fitness.__init__(self)
def __call__(self, fx):
'''
Calculates the fitness for all individuals in the population.
:Parameters:
fx
The values of the objective function for every individual on the
population to be processed. Please, consult the ``ga`` module for
more information on populations. This method calculates the fitness
according to the equally spaced ranking technique.
:Returns:
A vector containing the fitness value for every individual in the
population, in the same order that they appear there.
'''
fx = fx - min(fx)
fx = (argsort(fx) + 1.) / len(fx)
return fx / sum(fx)
################################################################################ | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/chrossover.py
# Basic definitions for crossover among chromosomes
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitions for crossover operations and base classes.
Crossover is a very basic and important operation in genetic algorithms. It is
by means of crossover among the chromosomes that population gains diversity,
thus exploring more completelly the solution space and giving better answers.
This sub-module provides definitions of the most common crossover operations,
and provides a class that can be subclassed to construct different types of
crossover for experimentation.
"""
################################################################################
from numpy.random import uniform
from random import randrange
################################################################################
# Classes
################################################################################
class Crossover(object):
'''
Base class for crossover operators.
This class should be subclassed if you want to create your own crossover
operator. The base class doesn't do much, it is only a prototype. As is done
with all the base classes within this library, use the ``__init__`` method
to configure your crossover behaviour -- if needed -- and the ``__call__``
method to operate over a population.
A class derived from this one should implement at least 2 methods, defined
below:
__init__(self, *cnf, **kw)
Initializes the object. There are no mandatory arguments, but any
parameters can be used here to configure the operator. For example, a
class can define a crossover rate -- this should be defined here::
__init__(self, rate=0.75)
A default value should always be offered, if possible.
__call__(self, population)
The ``__call__`` implementation should receive a population and operate
over it. Please, consult the ``ga`` module to see more information on
populations. It should return the processed population. No recomendation
on the internals of the method is made. That being said, in general the
crossover operators pairs chromosomes and swap bits among them (but
there is nothing to say that you can't do it differently).
Please, note that the GA implementations relies on this behaviour: it will
pass a population to your ``__call__`` method and expects to received the
result back.
'''
pass
################################################################################
class OnePoint(Crossover):
'''
A one-point crossover operator.
A one-point crossover randomly selects a single point in two chromosomes and
swaps the bits among them from that point until the end of the bit stream.
The crossover rate is the probability that two paired chromosomes will
exchange bits.
'''
def __init__(self, rate=0.75):
'''
Initialize the crossover operator.
:Parameters:
rate
Probability that two paired chromosomes will exchange bits.
'''
self.rate = rate
'''Property that contains the crossover rate.'''
def __call__(self, population):
'''
Proceeds the crossover over a population.
In one-point crossover, chromosomes from a population are randomly
paired. If a uniform random number is below the ``rate`` given in the
instantiation of the operator, then a random point is selected and bits
from that point until the end of the chromosomes are exchanged.
:Parameters:
population
A list of ``Chromosomes`` containing the present population of the
algorithm. It is processed and the results of the exchange are
returned to the caller.
:Returns:
The processed population, a list of ``Chromosomes``.
'''
rate = self.rate
chromosize = population.chromosome_size
size = len(population)
for j in xrange(int((size-1)/2)*2, 2):
if uniform(0., 1.) <= rate:
pos = randrange(chromosize)
tmp = population[j][pos:]
population[j][pos:] = population[j+1][pos:]
population[j+1][pos:] = tmp
return population
################################################################################
class TwoPoint(Crossover):
'''
A two-point crossover operator.
A two-point crossover randomly selects two points in two chromosomes and
swaps the bits among them between these points. The crossover rate is the
probability that two paired chromosomes will exchange bits.
'''
def __init__(self, rate=0.75):
'''
Initialize the crossover operator.
:Parameters:
rate
Probability that two paired chromosomes will exchange bits.
'''
self.rate = rate
'''Property that contains the crossover rate.'''
def __call__(self, population):
'''
Proceeds the crossover over a population.
In two-point crossover, chromosomes from a population are randomly
paired. If a uniform random number is below the ``rate`` given in the
instantiation of the operator, then random points are selected and bits
between those points are exchanged.
:Parameters:
population
A list of ``Chromosomes`` containing the present population of the
algorithm. It is processed and the results of the exchange are
returned to the caller.
:Returns:
The processed population, a list of ``Chromosomes``.
'''
rate = self.rate
chromosize = population.chromosome_size
size = len(population)
for j in xrange(int((size-1)/2)*2, 2):
if uniform(0., 1.) <= rate:
ipos = randrange(chromosize)
epos = randrange(chromosize)
if epos < ipos:
ipos, epos = epos, ipos
tmp = population[j][ipos:epos]
population[j][ipos:epos] = population[j+1][ipos:epos]
population[j+1][ipos:epos] = tmp
return population
################################################################################
class Uniform(Crossover):
'''
A uniform crossover operator.
A uniform crossover scans two chromosomes in a bit-to-bit fashion. According
to a given crossover rate, the corresponding bits are exchanged. The
crossover rate is the probability that two bits will be exchanged.
'''
def __init__(self, rate=0.75):
'''
Initialize the crossover operator.
:Parameters:
rate
Probability that bits from two paired chromosomes will be exchanged.
'''
self.rate = rate
'''Property that contains the crossover rate.'''
def __call__(self, population):
'''
Proceeds the crossover over a population.
In uniform crossover, chromosomes from a population are randomly paired,
and scaned in a bit-to-bit fashion. If a uniform random number is below
the ``rate`` given in the instantiation of the operator, then the bits
under scan will be exchanged in the chromosomes.
:Parameters:
population
A list of ``Chromosomes`` containing the present population of the
algorithm. It is processed and the results of the exchange are
returned to the caller.
:Returns:
The processed population, a list of ``Chromosomes``.
'''
rate = self.rate
chromosize = population.chromosome_size
size = len(population)
for j in xrange(int((size-1)/2)*2, 2):
for k in xrange(chromosize):
if uniform(0., 1.) <= rate:
tmp = population[j][k]
population[j][k] = population[j+1][k]
population[j+1][k] = tmp
return population
################################################################################
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: ga/__init__.py
# Makes the ga directory a python package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements genetic algorithms. Consult:
base
Implementation of the basic genetic algorithm;
chromosome
Basic definitions to work with chromosomes. Defined as arrays of bits;
crossover
Defines crossover operators and base classes;
fitness
Defines fitness functions and base classes;
mutation
Defines mutation operators and base classes;
selection
Defines selection operators and base classes;
"""
# __all__ = [ 'base', 'chromosome', 'crossover', 'fitness', 'mutation', 'selection' ]
################################################################################
# Imports sub-packages
from peach.ga.base import *
from peach.ga.chromosome import *
from peach.ga.crossover import *
from peach.ga.fitness import *
from peach.ga.mutation import *
from peach.ga.selection import *
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: pso/base.py
# Basic particle swarm optimization
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements the simple continuous version of the particle swarm
optimizer. In this implementation, it is possible to specify, besides the
objective function and the first estimates, the ranges of search, which will
influence the max velocity of the particles, and the population size. Other
parameters are available too, please refer to the rest of this documentation for
further details.
"""
from numpy import array, argmin, amin, amax, where
from numpy.random import random, uniform
from acc import *
################################################################################
# Classes
################################################################################
class ParticleSwarmOptimizer(list):
'''
A standard Particle Swarm Optimizer
This class implements a particle swarm optimization (PSO) procedure. A
swarm is a list of estimates, and should answer to every ``list`` method. A
population of particles is created to travel through the search domain with
a certain velocity. At each point, the objective function is evaluated for
each particle, and the positions are adjusted correspondingly. The velocity
is then modified (ie, the particles are accelerated) towards its 'personal'
best (the best value found by that particle at the moment) and a global best
(the best value found overall at the moment).
'''
def __init__(self, f, x0, ranges=None, accelerator=StandardPSO, emax=1e-5, imax=1000):
'''
Initializes the optimizer.
:Parameters:
f
A multivariable function to be evaluated. It must receive only one
parameter, a multidimensional line-vector with the same dimensions
of the range list (see below) and return a real value, a scalar.
x0
A population of first estimates. This is a list, array or tuple of
one-dimension arrays, each one corresponding to an estimate of the
position of the minimum. The population size of the algorithm will
be the same as the number of estimates in this list. Each component
of the vectors in this list are one of the variables in the function
to be optimized.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If this parameter is not supplied, then the ranges will
be computed from the estimates, but be aware that this might not
represent the complete search space. If supplied, this parameter
should be a list of ranges for each variable of the objective
function. It is specified as a list of tuples of two values,
``(x0, x1)``, where ``x0`` is the start of the interval, and ``x1``
its end. Obviously, ``x0`` should be smaller than ``x1``. It can
also be given as a list with a simple tuple in the same format. In
that case, the same range will be applied for every variable in the
optimization.
accelerator
An acceleration method, please consult the documentation on ``acc``
module. Defaults to StandardPSO, that is, velocities change based on
local and global bests.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
list.__init__(self, [ ])
self.__fx = [ ]
for x in x0:
x = array(x).ravel()
self.append(x)
self.__fx.append(f(x))
self.__f = f
# Determine ranges of the variables
if ranges is None:
ranges = zip(amin(self, axis=0), amax(self, axis=1))
else:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
# Randomly computes the initial velocities
s = len(self)
d = len(x0[0])
r = self.ranges
self.__v = (random((s, d)) - 0.5) * (r[:, 1] - r[:, 0])/10.
# Verifies the validity of the acceleration method
try:
issubclass(accelerator, Accelerator)
accelerator = accelerator(self)
except TypeError:
pass
if not isinstance(accelerator, Accelerator):
raise TypeError, 'not a valid acceleration method'
else:
self.__acc = accelerator
self.__emax = emax
self.__imax = imax
def __get_fx(self):
return self.__fx
fx = property(__get_fx, None)
'''Array containing the objective function values for each estimate in the
swarm.'''
def __get_best(self):
m = argmin(self.__fx)
return self[m]
best = property(__get_best, None)
'''Single vector containing the position of the best point found by all the
particles. Not writeable.'''
def __get_fbest(self):
m = argmin(self.__fx)
return self.__fx[m]
fbest = property(__get_fbest, None)
'''Single scalar value containing the function value of the best point by
all the particles. Not writeable.'''
def restart(self, x0):
'''
Resets the optimizer, allowing the use of a new set of estimates. This
can be used to avoid stagnation
:Parameters:
x0
A new set of estimates. It doesn't need to have the same size of the
original swarm, but it must be a list of estimates in the same
format as in the object instantiation. Please, see the documentation
on the instantiation of the class. New velocities will be computed.
'''
self[:] = [ ]
self.__fx = [ ]
f = self.__f
for x in x0:
x = array(x).ravel()
self.append(x)
self.__fx.append(f(x))
# Randomly computes the initial velocities
s = len(self)
d = len(x0[0])
r = self.ranges
self.__v = (random((s, d)) - 0.5) * (r[:, 1] - r[:, 0])/10.
def step(self):
'''
Computes the new positions of the particles, a step of the algorithm.
This method updates the velocity given the constants associated with the
particle and global bests; and then updates the positions accordingly.
This method has no parameters and returns no values. The particles
positions can be consulted with the ``[]`` interface (as a swarm of
particles is a list of estimates), ``best`` property, to find the global
best, and ``fbest`` property to find the minimum (see above).
'''
oldbest = self.best
f = self.__f
p = array(self)
v = self.__acc(self.__v)
# Next estimates
p = p + v
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
p = where(p < r0, uniform(r0, r1, p.shape), p)
p = where(p > r1, uniform(r0, r1, p.shape), p)
# Update state
self.__v = v
self[:] = list(p)
for i in xrange(len(self)):
self.__fx[i] = f(p[i])
best = self.best
return best, abs(best - oldbest)/best
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
x, e = self.step()
i = i + 1
return x, e
class PSO(ParticleSwarmOptimizer):
'''
PSO is an alias to ``ParticleSwarmOptimizer``
'''
pass
################################################################################
# Test
if __name__ == "__main__":
def f(xy):
x, y = xy
return (1-x)**2 + (y-x*x)**2
i = 0
x0 = random((5, 2))*2
#p = ParticleSwarmOptimizer(f, x0, [ (0., 2.), (0., 2.) ])
p = ParticleSwarmOptimizer(f, x0)
while p.fbest > 5e-7:
print p
print p.best
print p.fbest
p.step()
i = i + 1
print '-'*50
print i, p.best, p.fbest | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: pso/acc.py
# Functions to update the velocity of particles in a swarm.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Functions to update the velocity (ie, accelerate) of the particles in a swarm.
Acceleration of a particle is an important concept in the theory of particle
swarm optimizers. By choosing an adequate acceleration, particle velocity is
changed so that they can search the domain of definition of the objective
function such that there is a greater probability that a global minimum is
found. Since particle swarm optimizers are derived from genetic algorithms, it
can be said that this is what creates diversity in a swarm, such that the space
is more thoroughly searched.
"""
################################################################################
from numpy import array, sqrt, abs, select, sign
from numpy.random import random
################################################################################
# Classes
################################################################################
class Accelerator(object):
'''
Base class for accelerators.
This class should be derived to implement a function which computes the
acceleration of a vector of particles in a swarm. Every accelerator function
should implement at least two methods, defined below:
__init__(self, *cnf, **kw)
Initializes the object. There are no mandatory arguments, but any
parameters can be used here to configure the operator. For example, a
class can define a variance for randomly chose the acceleration -- this
should be defined here::
__init__(self, variance=1.0)
A default value should always be offered, if possible.
__call__(self, v):
The ``__call__`` interface should be programmed to actually compute the
new velocity of a vector of particles. This method should receive a
velocity in ``v`` and use whatever parameters from the instantiation to
compute the new velocities. Notice that this function should operate
over a vector of velocities, not on a single velocity. This class,
however, can be instantiated with a single function that is adapted to
perform over a vector.
'''
def __init__(self, f):
'''
Initializes an accelerator object.
This method initializes an accelerator. It receives as argument a simple
function that is adapted to operate over a vector of velocities.
:Parameters:
f
The function to be used as acceleration. This function can be simple
function that receives a ``n``-dimensional vector representing the
velocity of a single particle, where ``n`` is the dimensionality of
the objective function. The object then wraps the function such that
it can receive a list of velocities and applies the acceleration on
every one of them.
'''
self.__f = f
def __call__(self, v):
'''
Computes new velocities for every particle.
This method should be overloaded in implementations of different
accelerators. This method receives the velocities as a list or a vector
of the velocities (a ``n``-dimensional vector in each line) or each
particle in a swarm and computes, for each one of them, a new velocity.
:Parameters:
v
A list or a vector of velocities, where each velocity is one line of
the vector or one element of the list.
:Returns:
A vector of the same size as the argument with the updated velocities.
The returned vector is returned as a bidimensional array.
'''
vn = [ self.__f(vi) for vi in list(v) ]
return array(vn)
################################################################################
class StandardPSO(Accelerator):
'''
Standard PSO Accelerator
This class implements a method for changing the velocities of particles in
a particle swarm. The standard way is to retain information on local bests
and the global bests, and update the velocity based on that.
'''
def __init__(self, ps, vmax=None, cp=2.05, cg=2.05):
'''
Initializes the accelerator.
:Parameters:
ps
A reference to the Particle Swarm that should be updated. This
class, in instantiation, will assume that the position of the
particles in the moment of creation are the local best. The
objective function is computed for all particles, and the values
saved for reference in the future. Also, at the same time, the
global best is computed.
cp
The velocity adjustment constant associated with the particle best
values. Defaults to 2.05.
cg
The velocity adjustment constant associated with the global best
values. Defaults to 2.05. The defaults in the ``cp`` and ``cg``
parameters are such that the inertia weight in the constrition
method satisfies ``cp + cg > 4``. Please, look in the bibliography
for more information.
'''
self.__ps = ps
self.__pbest = ps[:]
self.__fpbest = ps.fx[:]
self.__gbest = ps.best
self.__fgbest = ps.fbest
if vmax is None:
self.__vmax = vmax
else:
vmax = array(vmax).ravel()
if len(vmax) == 1:
self.__vmax = vmax * ones((len(ps.x[0]), ))
else:
self.__vmax = vmax
self.__vmax = 0.15 * (ps.ranges[:, 1] - ps.ranges[:, 0])
self.cp = cp
'''Velocity adjustment constant associated with the particle best values.'''
self.cg = cg
'''Velocity adjustment constant associated with the global best values.'''
phi = cp + cg
self.__k = 2./abs(2. - phi - sqrt((phi - 4.)*phi))
def __call__(self, v):
'''
Computes the new velocities for every particle in the swarm. This method
receives the velocities as a list or a vector of the velocities (a
``n``-dimensional vector in each line) or each particle in a swarm and
computes, for each one of them, a new velocity.
:Parameters:
v
A list or a vector of velocities, where each velocity is one line of
the vector or one element of the list.
:Returns:
A vector of the same size as the argument with the updated velocities.
The returned vector is returned as a bidimensional array.
'''
ps = self.__ps
fx = ps.fx[:]
# Updates local and global best.
for i in xrange(len(fx)):
if fx[i] < self.__fpbest[i]:
self.__pbest[i] = ps[i]
self.__fpbest[i] = fx[i]
if fx[i] < self.__fgbest:
self.__gbest = ps[i]
self.__fgbest = fx[i]
# Updates speed.
ps = array(ps)
s = ps.shape
v = self.__k * (v + self.cp * random(s) * (self.__pbest - ps) \
+ self.cg * random(s) * (self.__gbest - ps))
vmax = self.__vmax
if vmax is not None:
v = select( [ v < vmax ], [ v ], sign(v)*vmax )
return v
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: pso/__init__.py
# Makes the pso directory a package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic Particle Swarm Optimization (PSO)
This sub-package implements traditional particle swarm optimizers as described
in literature. It consists of a very simple algorithm emulating the behaviour
of a flock of birds (though in a very simplified way). A population of particles
is created, each particle with its corresponding velocity. They fly towards the
particle local best and the swarm global best, thus exploring the whole domain.
For consistency purposes, the particles are represented internally as a list of
vectors. The particles can be acessed externally by using the ``[ ]`` interface.
See the rest of the documentation for more information.
"""
# __all__ = [ 'base', 'acc' ]
################################################################################
# Imports sub-packages
from peach.pso.base import * # Basic definitions
from peach.pso.acc import * # Acceleration of particles
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/fuzzy.py
# Fuzzy logic basic definitions
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements basic definitions for fuzzy logic
"""
################################################################################
import numpy
import types
import norms
################################################################################
# Classes
################################################################################
class FuzzySet(numpy.ndarray):
'''
Array containing fuzzy values for a set.
This class defines the behavior of a fuzzy set. It is an array of values in
the range from 0 to 1, and the basic operations of the logic -- and (using
the ``&`` operator); or (using the ``|`` operator); not (using ``~``
operator) -- can be defined according to a set of norms. The norms can be
redefined using the appropriated methods.
To create a FuzzySet, instantiate this class with a sequence as argument,
for example::
fuzzy_set = FuzzySet([ 0., 0.25, 0.5, 0.75, 1.0 ])
'''
__AND__ = norms.ZadehAnd
'Class variable to hold the *and* method'
__OR__ = norms.ZadehOr
'Class variable to hold the *or* method'
__NOT__ = norms.ZadehNot
'Class variable to hold the *not* method'
def __new__(cls, data):
'''
Allocates space for the array.
A fuzzy set is derived from the basic NumPy array, so the appropriate
functions and methods are called to allocate the space. In theory, the
values for a fuzzy set should be in the range ``0.0 <= x <= 1.0``, but
to increase efficiency, no verification is made.
:Returns:
A new array object with the fuzzy set definitions.
'''
data = numpy.array(data, dtype=float)
shape = data.shape
data = numpy.ndarray.__new__(cls, shape=shape, buffer=data,
dtype=float, order=False)
return data.copy()
def __init__(self, data=[]):
'''
Initializes the object.
Operations are defaulted to Zadeh norms ``(max, min, 1-x)``
'''
pass
def __and__(self, a):
'''
Fuzzy and (``&``) operation.
'''
return FuzzySet(FuzzySet.__AND__(self, a))
def __or__(self, a):
'''
Fuzzy or (``|``) operation.
'''
return FuzzySet(FuzzySet.__OR__(self, a))
def __invert__(self):
'''
Fuzzy not (``~``) operation.
'''
return FuzzySet(FuzzySet.__NOT__(self))
@classmethod
def set_norm(cls, f):
'''
Selects a t-norm (and operation)
Use this method to change the behaviour of the and operation.
:Parameters:
f
A function of two parameters which must return the ``and`` of the
values.
'''
if isinstance(f, numpy.vectorize):
cls.__AND__ = f
elif isinstance(f, types.FunctionType):
cls.__AND__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
@classmethod
def set_conorm(cls, f):
'''
Selects a t-conorm (or operation)
Use this method to change the behaviour of the or operation.
:Parameters:
f
A function of two parameters which must return the ``or`` of the
values.
'''
if isinstance(f, numpy.vectorize):
cls.__OR__ = f
elif isinstance(f, types.FunctionType):
cls.__OR__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
@classmethod
def set_negation(cls, f):
'''
Selects a negation (not operation)
Use this method to change the behaviour of the not operation.
:Parameters:
f
A function of one parameter which must return the ``not`` of the
value.
'''
if isinstance(f, numpy.vectorize):
cls.__NOT__ = f
elif isinstance(f, types.FunctionType):
cls.__NOT__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/control.py
# Fuzzy based controllers, or fuzzy inference systems
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements fuzzy controllers, of fuzzy inference systems.
There are two types of controllers implemented in this package. The Mamdani
controller is the traditional approach, where input (or controlled) variables
are fuzzified, a set of decision rules determine the outcome in a fuzzified way,
and a defuzzification method is applied to obtain the numerical result.
The Sugeno controller operates in a similar way, but there is no defuzzification
step. Instead, the value of the output (or manipulated) variable is determined
by parametric models, and the final result is determined by a weighted average
based on the decision rules. This type of controller is also known as parametric
controller.
"""
################################################################################
import numpy
from numpy import zeros, array, dot
import types
from base import *
from mf import *
from norms import *
from defuzzy import *
################################################################################
# Basic Mamdani controller
################################################################################
class Controller(object):
'''
Basic Mamdani controller
This class implements a standard Mamdani controller. A controller based on
fuzzy logic has a somewhat complex behaviour, so it is not explained here.
There are numerous references that can be consulted.
It is essential to understand the format that decision rules must follow to
obtain correct behaviour of the controller. A rule is a tuple given by::
((mx0, mx1, ..., mxn), my)
where ``mx0`` is a membership function of the first input variable, ``mx1``
is a membership function of the second input variable and so on; and ``my``
is a membership function or a fuzzy set of the output variable.
Notice that ``mx``'s are *functions* not fuzzy sets! They will be applied to
the values of the input variables given in the function call, so, if they
are anything different from a membership function, an exception will be
raised. Please, consult the examples to see how they must be used.
'''
def __init__(self, yrange, rules=[], defuzzy=Centroid,
norm=ZadehAnd, conorm=ZadehOr, negation=ZadehNot,
imply=MamdaniImplication, aglutinate=MamdaniAglutination):
'''
Creates and initialize the controller.
:Parameters:
yrange
The range of the output variable. This must be given as a set of
points belonging to the interval where the output variable is
defined, not only the start and end points. It is strongly suggested
that the interval is divided in some (eg.: 100) points equally
spaced;
rules
The set of decision rules, as defined above. If none is given, an
empty set of rules is assumed;
defuzzy
The defuzzification method to be used. If none is given, the
Centroid method is used;
norm
The norm (``and`` operation) to be used. Defaults to Zadeh and.
conorm
The conorm (``or`` operation) to be used. Defaults to Zadeh or.
negation
The negation (``not`` operation) to be used. Defaults to Zadeh not.
imply
The implication method to be used. Defaults to Mamdani implication.\
aglutinate
The aglutination method to be used. Defaults to Mamdani
aglutination.
'''
self.__y = yrange
self.__rules = [ ]
if isinstance(rules, list):
for r in rules:
self.add_rule(r)
self.defuzzify = defuzzy
self.__AND__ = norm
self.__OR__ = conorm
self.__NOT__ = negation
self.__IMP__ = imply
self.__AGL__ = aglutinate
def __gety(self):
return self.__y
y = property(__gety, None)
'''Property that returns the output variable interval. Not writable'''
def __getrules(self):
return self.__rules[:]
rules = property(__getrules, None)
'''Property that returns the list of decision rules. Not writable'''
def set_norm(self, f):
'''
Sets the norm (``and``) to be used.
This method must be used to change the behavior of the ``and`` operation
of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the ``and`` result.
'''
if isinstance(f, numpy.vectorize):
self.__AND__ = f
elif isinstance(f, types.FunctionType):
self.__AND__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_conorm(self, f):
'''
Sets the conorm (``or``) to be used.
This method must be used to change the behavior of the ``or`` operation
of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the ``or`` result.
'''
if isinstance(f, numpy.vectorize):
self.__OR__ = f
elif isinstance(f, types.FunctionType):
self.__OR__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_negation(self, f):
'''
Sets the negation (``not``) to be used.
This method must be used to change the behavior of the ``not`` operation
of the controller.
:Parameters:
f
The function can be any function that takes one numerical value and
return one numerical value, that corresponds to the ``not`` result.
'''
if isinstance(f, numpy.vectorize):
self.__NOT__ = f
elif isinstance(f, types.FunctionType):
self.__NOT__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_implication(self, f):
'''
Sets the implication to be used.
This method must be used to change the behavior of the implication
operation of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the implication
result.
'''
if isinstance(f, numpy.vectorize):
self.__IMP__ = f
elif isinstance(f, types.FunctionType):
self.__IMP__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def set_aglutination(self, f):
'''
Sets the aglutination to be used.
This method must be used to change the behavior of the aglutination
operation of the controller.
:Parameters:
f
The function can be any function that takes two numerical values and
return one numerical value, that corresponds to the aglutination
result.
'''
if isinstance(f, numpy.vectorize):
self.__AGL__ = f
elif isinstance(f, types.FunctionType):
self.__AGL__ = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def add_rule(self, rule):
'''
Adds a decision rule to the knowledge base.
It is essential to understand the format that decision rules must follow
to obtain correct behaviour of the controller. A rule is a tuple must
have the following format::
((mx0, mx1, ..., mxn), my)
where ``mx0`` is a membership function of the first input variable,
``mx1`` is a membership function of the second input variable and so on;
and ``my`` is a membership function or a fuzzy set of the output
variable.
Notice that ``mx``'s are *functions* not fuzzy sets! They will be
applied to the values of the input variables given in the function call,
so, if they are anything different from a membership function, an
exception will be raised when the controller is used. Please, consult
the examples to see how they must be used.
'''
mx, my = rule
for m in mx:
if not (isinstance(m, Membership) or m is None):
raise ValueError, 'condition not a membership function'
if isinstance(my, Membership):
rule = (mx, my(self.__y))
elif not isinstance(my, FuzzySet):
raise ValueError, 'consequent not a fuzzy set or membership function'
self.__rules.append(rule)
def add_table(self, lx1, lx2, table):
'''
Adds a table of decision rules in a two variable controller.
Typically, fuzzy controllers are used to control two variables. In that
case, the set of decision rules are given in the form of a table, since
that is a more compact format and very easy to visualize. This is a
convenience function that allows to add decision rules in the form of a
table. Notice that the resulting knowledge base will be the same if this
function is used or the ``add_rule`` method is used with every single
rule. The second method is in general easier to read in a script, so
consider well.
:Parameters:
lx1
The set of membership functions to the variable ``x1``, or the
lines of the table
lx2
The set of membership functions to the variable ``x2``, or the
columns of the table
table
The consequent of the rule where the condition is the line ``and``
the column. These can be the membership functions or fuzzy sets.
'''
for i in range(len(lx1)):
for j in range(len(lx2)):
my = table[i][j]
if my is not None:
self.add_rule(((lx1[i], lx2[j]), my))
def eval(self, r, xs):
'''
Evaluates one decision rule in this controller
Takes a rule from the controller and evaluates it given the values of
the input variables.
:Parameters:
r
The rule in the standard format, or an integer number. If ``r`` is
an integer, then the ``r`` th rule in the knowledge base will be
evaluated.
xs
A tuple, a list or an array containing the values of the input
variables. The dimension must be coherent with the given rule.
:Returns:
This method evaluates each membership function in the rule for each
given value, and ``and`` 's the results to obtain the condition. If
the condition is zero, a tuple ``(0.0, None) is returned. Otherwise,
the condition is ``imply`` ed in the membership function of the output
variable. A tuple containing ``(condition, imply)`` (the membership
value associated to the condition and the result of the implication)
is returned.
'''
if type(r) is types.IntType:
r = self.__rules[r]
mx, my = r
# Finds the membership value for each xn
cl = [ m(x) for m, x in zip(mx, xs) if m is not None ]
# Apply the ``and`` operation
mr = reduce(lambda x0, x1: self.__AND__(x0, x1), cl)
# Implication, unnecessary if mr == 0
if mr == 0.0:
return (0.0, None)
else:
return (mr, self.__IMP__(mr, my))
def eval_all(self, *xs):
'''
Evaluates all the rules and aglutinates the results.
Given the values of the input variables, evaluate and apply every rule
in the knowledge base (with the ``eval`` method) and aglutinates the
results.
:Parameters:
xs
A tuple, a list or an array with the values of the input variables.
:Returns:
A fuzzy set containing the result of the evaluation of every rule in
the knowledge base, with the results aglutinated.
'''
ry = FuzzySet(zeros(self.__y.shape))
for r in self.__rules:
mr, iy = self.eval(r, xs)
if mr != 0.0:
ry = self.__AGL__(ry, iy)
return ry
def __call__(self, *xs):
'''
Apply the controller to the set of input variables
Given the values of the input variables, evaluates every decision rule,
aglutinates the results and defuzzify it. Returns the response of the
controller.
:Parameters:
xs
A tuple, a list or an array with the values of the input variables.
:Returns:
The response of the controller.
'''
ry = self.eval_all(*xs)
return self.defuzzify(ry, self.__y)
class Mamdani(Controller):
'''``Mandani`` is an alias to ``Controller``'''
pass
################################################################################
# Basic Takagi-Sugeno controller
################################################################################
class Parametric(object):
'''
Basic Parametric controller
This class implements a standard parametric (or Takagi-Sugeno) controller. A
controller based on fuzzy logic has a somewhat complex behaviour, so it is
not explained here. There are numerous references that can be consulted.
It is essential to understand the format that decision rules must follow to
obtain correct behaviour of the controller. A rule is a tuple given by::
((mx0, mx1, ..., mxn), (a0, a1, ..., an))
where ``mx0`` is a membership function of the first input variable, ``mx1``
is a membership function of the second input variable and so on; and ``a0``
is the linear parameter, ``a1`` is the parameter associated with the first
input variable, ``a2`` is the parameter associated with the second input
variable and so on. The response to the rule is calculated by::
y = a0 + a1*x1 + a2*x2 + ... + an*xn
Notice that ``mx``'s are *functions* not fuzzy sets! They will be applied to
the values of the input variables given in the function call, so, if they
are anything different from a membership function, an exception will be
raised. Please, consult the examples to see how they must be used.
'''
def __init__(self, rules = [], norm=ProbabilisticAnd,
conorm=ProbabilisticOr, negation=ProbabilisticNot):
'''
Creates and initializes the controller.
:Parameters:
rules
List containing the decision rules for the controller. If not given,
an empty set of decision rules is used.
norm
The norm (``and`` operation) to be used. Defaults to Probabilistic
and.
conorm
The conorm (``or`` operation) to be used. Defaults to Probabilistic
or.
negation
The negation (``not`` operation) to be used. Defaults to
Probabilistic not.
'''
self.__rules = [ ]
if isinstance(rules, list):
for r in rules:
self.add_rules(r)
self.__AND__ = norm
self.__OR__ = conorm
self.__NOT__ = negation
def __getrules(self):
return self.__rules[:]
rules = property(__getrules, None)
'''Property that returns the list of decision rules. Not writable'''
def add_rule(self, rule):
'''
Adds a decision rule to the knowledge base.
It is essential to understand the format that decision rules must follow
to obtain correct behaviour of the controller. A rule is a tuple given
by::
((mx0, mx1, ..., mxn), (a0, a1, ..., an))
where ``mx0`` is a membership function of the first input variable,
``mx1`` is a membership function of the second input variable and so on;
and ``a0`` is the linear parameter, ``a1`` is the parameter associated
with the first input variable, ``a2`` is the parameter associated with
the second input variable and so on.
Notice that ``mx``'s are *functions* not fuzzy sets! They will be
applied to the values of the input variables given in the function call,
so, if they are anything different from a membership function, an
exception will be raised. Please, consult the examples to see how they
must be used.
'''
mx, a = rule
for m in mx:
if not (isinstance(m, Membership) or m is None):
raise ValueError, 'condition not a membership function'
a = array(a, dtype=float)
rule = (mx, a)
self.__rules.append(rule)
def eval(self, r, xs):
'''
Evaluates one decision rule in this controller
Takes a rule from the controller and evaluates it given the values of
the input variables. The format of the rule is as given, and the
response to the rule is calculated by::
y = a0 + a1*x1 + a2*x2 + ... + an*xn
:Parameters:
r
The rule in the standard format, or an integer number. If ``r`` is
an integer, then the ``r`` th rule in the knowledge base will be
evaluated.
xs
A tuple, a list or an array containing the values of the input
variables. The dimension must be coherent with the given rule.
:Returns:
This method evaluates each membership function in the rule for each
given value, and ``and`` 's the results to obtain the condition. If
the condition is zero, a tuple ``(0.0, 0.0) is returned. Otherwise,
the result as given above is calculate, and a tuple containing
``(condition, result)`` (the membership value associated to the
condition and the result of the calculation) is returned.
'''
if type(r) is types.IntType:
r = self.__rules[r]
mx, a = r
# Finds the membership value for each xn
cl = [ m(x) for m, x in zip(mx, xs) if m is not None ]
# Apply ``and`` operation
mr = reduce(lambda x0, x1: self.__AND__(x0, x1), cl)
# Implication, returns 0.0 if mr == 0
if mr > 0.0:
return (mr, dot(a, xs))
else:
return (0.0, 0.0)
def __call__(self, *xs):
'''
Apply the controller to the set of input variables
Given the values of the input variables, evaluates every decision rule,
and calculates the weighted average of the results. Returns the response
of the controller.
:Parameters:
xs
A tuple, a list or an array with the values of the input variables.
:Returns:
The response of the controller.
'''
ys = array([ self.eval(r, xs) for r in self.__rules ])
m = ys[:, 0]
y = ys[:, 1]
return sum(m*y) / sum(m)
class Sugeno(Parametric):
'''``Sugeno`` is an alias to ``Parametric``'''
pass
################################################################################
# Test
if __name__ == "__main__":
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/defuzzy.py
# Defuzzification methods
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements defuzzification methods for use with fuzzy controllers.
Defuzzification methods take a set of numerical values, their corresponding
fuzzy membership values and calculate a defuzzified value for them. They're
implemented as functions, not as classes. So, to implement your own, use the
directions below.
These methods are implemented as functions with the signature ``(mf, y)``, where
``mf`` is the fuzzy set, and ``y`` is an array of values. That is, ``mf`` is a
fuzzy set containing the membership values of each one in the ``y`` array, in
the respective order. Both arrays should have the same dimensions, or else the
methods won't work.
See the example::
>>> import numpy
>>> from peach import *
>>> y = numpy.linspace(0., 5., 100)
>>> m_y = Triangle(1., 2., 3.)
>>> Centroid(m_y(y), y)
2.0001030715316435
The methods defined here are the most commonly used.
"""
################################################################################
import numpy
import types
from base import *
################################################################################
# Defuzzification methods
################################################################################
def Centroid(mf, y):
'''
Center of gravity method.
The center of gravity is calculate using the standard formula found in any
calculus book. The integrals are calculated using the trapezoid method.
:Parameters:
mf
Fuzzy set containing the membership values of the elements in the
vector given in sequence
y
Array of domain values of the defuzzified variable.
:Returns:
The center of gravity of the fuzzy set.
'''
return numpy.trapz(mf*y, y) / numpy.trapz(mf, y)
def Bisector(mf, y):
'''
Bisection method
The bisection method finds a coordinate ``y`` in domain that divides the
fuzzy set in two subsets with the same area. Integrals are calculated using
the trapezoid method. This method only works if the values in ``y`` are
equally spaced, otherwise, the method will fail.
:Parameters:
mf
Fuzzy set containing the membership values of the elements in the
vector given in sequence
y
Array of domain values of the defuzzified variable.
:Returns:
Defuzzified value by the bisection method.
'''
a2 = numpy.trapz(mf, y) / 2.0
dy = y[1] - y[0]
b = 0
i = 0.0
while i < a2:
b = b + 1
i = i + 0.5 * (mf[b] + mf[b-1])*dy
return y[b]
def SmallestOfMaxima(mf, y):
'''
Smallest of maxima method.
This method finds all the points in the domain which have maximum membership
value in the fuzzy set, and returns the smallest of them.
:Parameters:
mf
Fuzzy set containing the membership values of the elements in the
vector given in sequence
y
Array of domain values of the defuzzified variable.
:Returns:
Defuzzified value by the smallest of maxima method.
'''
return y[numpy.argmax(mf)]
def LargestOfMaxima(mf, y):
'''
Largest of maxima method.
This method finds all the points in the domain which have maximum membership
value in the fuzzy set, and returns the largest of them.
:Parameters:
mf
Fuzzy set containing the membership values of the elements in the
vector given in sequence
y
Array of domain values of the defuzzified variable.
:Returns:
Defuzzified value by the largest of maxima method.
'''
return y[::-1][numpy.argmax(mf[::-1])]
def MeanOfMaxima(mf, y):
'''
Mean of maxima method.
This method finds the smallest and largest of maxima, and returns their
average.
:Parameters:
mf
Fuzzy set containing the membership values of the elements in the
vector given in sequence
y
Array of domain values of the defuzzified variable.
:Returns:
Defuzzified value by the of maxima method.
'''
mn = y[numpy.argmax(mf)]
mx = y[::-1][numpy.argmax(mf[::-1])]
return 0.5*(mn + mx)
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/norms.py
# Norms, conorms and negations
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements operations of fuzzy logic.
Basic operations are ``and (&)``, ``or (|)`` and ``not (~)``. Those are
implemented as functions of, respectively, two, two and one values. The ``and``
is the t-norm of the fuzzy logic, and it is a function that takes two values and
returns the result of the ``and`` operation. The ``or`` is a function that takes
two values and returns the result of the ``or`` operation. the ``not`` is a
function that takes one value and returns the result of the ``not`` operation.
To implement your own operations there is no need to subclass -- just create the
functions and use them where appropriate.
Also, implication and aglutination functions are defined here. Implication is
the result of the generalized modus ponens used in fuzzy inference systems.
Aglutination is the generalization from two different conclusions used in fuzzy
inference systems. Both are implemented as functions that take two values and
return the result of the operation. As above, to implement your own operations,
there is no need to subclass -- just create the functions and use them where
appropriate.
The functions here are provided as convenience.
"""
################################################################################
import numpy
################################################################################
# Lofti Zadeh's basic operations
################################################################################
def ZadehAnd(x, y):
'''
And operation as defined by Lofti Zadeh.
And operation is the minimum of the two values.
:Returns:
The result of the and operation.
'''
return numpy.minimum(x, y)
def ZadehOr(x, y):
'''
Or operation as defined by Lofti Zadeh.
Or operation is the maximum of the two values.
:Returns:
The result of the or operation.
'''
return numpy.maximum(x, y)
def ZadehNot(x):
'''
Not operation as defined by Lofti Zadeh.
Not operation is the complement to 1 of the given value, that is, ``1 - x``.
:Returns:
The result of the not operation.
'''
return 1 - x
def ZadehImplication(x, y):
'''
Implication operation as defined by Zadeh.
:Returns:
The result of the implication.
'''
return numpy.maximum(numpy.minimum(x, y), 1. - x)
ZADEH_NORMS = (ZadehAnd, ZadehOr, ZadehNot)
'Tuple containing, in order, Zadeh and, or and not operations'
################################################################################
# Drastic product and sum
################################################################################
def DrasticProduct(x, y):
'''
Drastic product that can be used as and operation
:Returns:
The result of the and operation
'''
return numpy.select([ x == 1., y == 1. ], [ y, x ], 0.)
def DrasticSum(x, y):
'''
Drastic sum that can be used as or operation
:Returns:
The result of the or operation
'''
return numpy.select([ x == 0., y == 0. ], [ y, x ], 1.)
DRASTIC_NORMS = (DrasticProduct, DrasticSum, ZadehNot)
'''Tuple containing, in order, Drastic product (and), Drastic sum (or) and Zadeh
not operations'''
################################################################################
# Einstein product and sum
################################################################################
def EinsteinProduct(x, y):
'''
Einstein product that can be used as and operation.
:Returns:
The result of the and operation.
'''
return (x*y) / (2. - (x + y - x*y))
def EinsteinSum(x, y):
'''
Einstein sum that can be used as or operation.
:Returns:
The result of the or operation.
'''
return (x + y) / (1. + x*y)
EINSTEIN_NORMS = (EinsteinProduct, EinsteinSum, ZadehNot)
'''Tuple containing, in order, Einstein product (and), Einstein sum (or) and
Zadeh not operations'''
################################################################################
# Mamdani's basic operations
################################################################################
def MamdaniImplication(x, y):
'''
Implication operation as defined by Mamdani.
Implication is the minimum of the two values.
:Returns:
The result of the implication.
'''
return numpy.minimum(x, y)
def MamdaniAglutination(x, y):
'''
Aglutination as defined by Mamdani.
Aglutination is the maximum of the two values.
:Returns:
The result of the aglutination.
'''
return numpy.maximum(x, y)
MAMDANI_INFERENCE = (MamdaniImplication, MamdaniAglutination)
'Tuple containing, in order, Mamdani implication and algutination'
################################################################################
# Probabilistic operations
################################################################################
def ProbabilisticAnd(x, y):
'''
And operation as a probabilistic operation.
And operation is the product of the two values.
:Returns:
The result of the and operation.
'''
return x*y
def ProbabilisticOr(x, y):
'''
Or operation as a probabilistic operation.
Or operation is given as the probability of the intersection of two events,
that is, x + y - xy.
:Returns:
The result of the or operation.
'''
return x + y - x*y
def ProbabilisticNot(x):
'''
Not operation as a probabilistic operation.
Not operation is the complement to 1 of the given value, that is, ``1 - x``.
:Returns:
The result of the not operation.
'''
return 1 - x
def ProbabilisticImplication(x, y):
'''
Implication as a probabilistic operation.
Implication is the product of the two values.
:Returns:
The result of the and implication.
'''
return x*y
def ProbabilisticAglutination(x, y):
'''
Implication as a probabilistic operation.
Implication is given as the probability of the intersection of two events,
that is, x + y - xy.
:Returns:
The result of the and algutination.
'''
return x + y - x*y
PROB_NORMS = (ProbabilisticAnd, ProbabilisticOr, ProbabilisticNot)
'Tuple containing, in order, probabilistic and, or and not operations'
PROB_INFERENCE = (ProbabilisticImplication, ProbabilisticAglutination)
'Tuple containing, in order, probabilistic implication and algutination'
################################################################################
# Other implications
################################################################################
def DienesRescherImplication(x, y):
'''
Natural implication as in truth table, defined by Dienes-Rescher
:Returns:
The result of the implication.
'''
return numpy.maximum(1.-x, y)
def LukasiewiczImplication(x, y):
'''
Implication of the Lukasiewicz three-valued logic.
:Returns:
The result of the implication.
'''
return numpy.minimum(1., 1. - x + y)
def GodelImplication(x, y):
'''
Implication as defined by Godel.
:Returns:
The result of the implication.
'''
return numpy.select([ x < y ], [ 1. ], y)
################################################################################
# Test
if __name__ == "__main__":
pass
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/cmeans.py
# Fuzzy C-Means algorithm
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Fuzzy C-Means
Fuzzy C-Means is a clustering algorithm based on fuzzy logic.
This package implements the fuzzy c-means algorithm for clustering and
classification. This algorithm is very simple, yet very efficient. From a
training set and an initial condition which gives the membership values of each
example in the training set to the clusters, it converges very fastly to crisper
sets.
The initial conditions, ie, the starting membership, must follow some rules.
Please, refer to any bibliography about the subject to see why. Those rules are:
no example might have membership 1 in every class, and the sum of the membership
of every component must be equal to 1. This means that the initial condition is
a fuzzy partition of the universe.
"""
################################################################################
import numpy
from numpy import dot, array, sum, zeros, outer, any
################################################################################
# Fuzzy C-Means class
################################################################################
class FuzzyCMeans(object):
'''
Fuzzy C-Means convergence.
Use this class to instantiate a fuzzy c-means object. The object must be
given a training set and initial conditions. The training set is a list or
an array of N-dimensional vectors; the initial conditions are a list of the
initial membership values for every vector in the training set -- thus, the
length of both lists must be the same. The number of columns in the initial
conditions must be the same number of classes. That is, if you are, for
example, classifying in ``C`` classes, then the initial conditions must have
``C`` columns.
There are restrictions in the initial conditions: first, no column can be
all zeros or all ones -- if that happened, then the class described by this
column is unnecessary; second, the sum of the memberships of every example
must be one -- that is, the sum of the membership in every column in each
line must be one. This means that the initial condition is a perfect
partition of ``C`` subsets.
Notice, however, that *no checking* is done. If your algorithm seems to be
behaving strangely, try to check these conditions.
'''
def __init__(self, training_set, initial_conditions, m=2.):
'''
Initializes the algorithm.
:Parameters:
training_set
A list or array of vectors containing the data to be classified.
Each of the vectors in this list *must* have the same dimension, or
the algorithm won't behave correctly. Notice that each vector can be
given as a tuple -- internally, everything is converted to arrays.
initial_conditions
A list or array of vectors containing the initial membership values
associated to each example in the training set. Each column of this
array contains the membership assigned to the corresponding class
for that vector. Notice that each vector can be given as a tuple --
internally, everything is converted to arrays.
m
This is the aggregation value. The bigger it is, the smoother will
be the classification. Please, consult the bibliography about the
subject. ``m`` must be bigger than 1. Its default value is 2
'''
self.__x = array(training_set)
self.__mu = array(initial_conditions)
self.m = m
'''The fuzzyness coefficient. Must be bigger than 1, the closest it is
to 1, the smoother the membership curves will be.'''
self.__c = self.centers()
def __getc(self):
return self.__c
def __setc(self, c):
self.__c = array(c).reshape(self.__c.shape)
c = property(__getc, __setc)
'''A ``numpy`` array containing the centers of the classes in the algorithm.
Each line represents a center, and the number of lines is the number of
classes. This property is read and write, but care must be taken when
setting new centers: if the dimensions are not exactly the same as given in
the instantiation of the class (*ie*, *C* centers of dimension *N*, an
exception will be raised.'''
def __getmu(self):
return self.__mu
mu = property(__getmu, None)
'''The membership values for every vector in the training set. This property
is modified at each step of the execution of the algorithm. This property is
not writable.'''
def __getx(self):
return self.__x
x = property(__getx, None)
'''The vectors in which the algorithm bases its convergence. This property
is not writable.'''
def centers(self):
'''
Given the present state of the algorithm, recalculates the centers, that
is, the position of the vectors representing each of the classes. Notice
that this method modifies the state of the algorithm if any change was
made to any parameter. This method receives no arguments and will seldom
be used externally. It can be useful if you want to step over the
algorithm. *This method has a colateral effect!* If you use it, the
``c`` property (see above) will be modified.
:Returns:
A vector containing, in each line, the position of the centers of the
algorithm.
'''
mm = self.__mu ** self.m
c = dot(self.__x.T, mm) / sum(mm, axis=0)
self.__c = c.T
return self.__c
def membership(self):
'''
Given the present state of the algorithm, recalculates the membership of
each example on each class. That is, it modifies the initial conditions
to represent an evolved state of the algorithm. Notice that this method
modifies the state of the algorithm if any change was made to any
parameter.
:Returns:
A vector containing, in each line, the membership of the corresponding
example in each class.
'''
x = self.__x
c = self.__c
M, _ = x.shape
C, _ = c.shape
r = zeros((M, C))
m1 = 1./(self.m-1.)
for k in range(M):
den = sum((x[k] - c)**2., axis=1)
if any(den == 0):
return self.__mu
frac = outer(den, 1./den)**m1
r[k, :] = 1. / sum(frac, axis=1)
self.__mu = r
return self.__mu
def step(self):
'''
This method runs one step of the algorithm. It might be useful to track
the changes in the parameters.
:Returns:
The norm of the change in the membership values of the examples. It
can be used to track convergence and as an estimate of the error.
'''
old = self.__mu
self.membership()
self.centers()
return sum(self.__mu - old)**2.
def __call__(self, emax=1.e-10, imax=20):
'''
The ``__call__`` interface is used to run the algorithm until
convergence is found.
:Parameters:
emax
Specifies the maximum error admitted in the execution of the
algorithm. It defaults to 1.e-10. The error is tracked according to
the norm returned by the ``step()`` method.
imax
Specifies the maximum number of iterations admitted in the execution
of the algorithm. It defaults to 20.
:Returns:
An array containing, at each line, the vectors representing the
centers of the clustered regions.
'''
error = 1.
i = 0
while error > emax and i < imax:
error = self.step()
i = i + 1
return self.c
################################################################################
# Test.
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/__init__.py
# Makes the fuzzy directory a python package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements fuzzy logic. Consult:
base
Basic definitions, classes and operations in fuzzy logic;
mf
Membership functions;
defuzzy
Defuzzification methods;
control
Fuzzy controllers (FIS - Fuzzy Inference Systems), for Mamdani- and
Sugeno-type controllers and others;
cmeans
Fuzzy C-Means clustering algorithm;
"""
# __all__ = [ 'base', 'control', 'mf', 'defuzzy', 'cmeans' ]
################################################################################
# Imports sub-packages
from peach.fuzzy.base import *
from peach.fuzzy.control import *
from peach.fuzzy.mf import *
from peach.fuzzy.defuzzy import *
from peach.fuzzy.cmeans import *
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: fuzzy/mf.py
# Membership functions for fuzzy logic
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Membership functions
Membership functions are actually subclasses of a main class called Membership,
see below. Instantiate a class to generate a function, optional arguments can be
specified to configure the function as needed. For example, to create a triangle
function starting at 0, with peak in 3, and ending in 4, use::
mu = Triangle(0, 3, 4)
Please notice that the return value is a *function*. To use it, apply it as a
normal function. For example, the function above, applied to the value 1.5
should return 0.5::
>>> print mu(1.5)
0.5
"""
################################################################################
import numpy
from numpy import exp, cos, pi
import types
from base import *
################################################################################
# Membership functions
################################################################################
class Membership(object):
'''
Base class of all membership functions.
This class is used as base of the implemented membership functions, and can
also be used to transform a regular function in a membership function that
can be used with the fuzzy logic package.
To create a membership function from a regular function ``f``, use::
mf = Membership(f)
A function this converted can be used with vectors and matrices and always
return a FuzzySet object. Notice that the value range is not verified so
that it fits in the range [ 0, 1 ]. It is responsibility of the programmer
to warrant that.
To subclass Membership, just use it as a base class. It is suggested that
the ``__init__`` method of the derived class allows configuration, and the
``__call__`` method is used to apply the function over its arguments.
'''
def __init__(self, f):
'''
Builds a membership function from a regular function
:Parameters:
f
Function to be transformed into a membership function. It must be
given, and it must be a ``FunctionType`` object, otherwise, a
``ValueError`` is raised.
'''
if isinstance(f, types.FunctionType):
self.__f = numpy.vectorize(f)
else:
raise ValueError, 'invalid function'
def __call__(self, x):
'''
Maps the function on a vector
:Parameters:
x
A value, vector or matrix over which the function is evaluated.
:Returns:
A ``FuzzySet`` object containing the evaluation of the function over
each of the components of the input.
'''
return FuzzySet(self.__f(x))
################################################################################
class IncreasingRamp(Membership):
'''
Increasing ramp.
Given two points, ``x0`` and ``x1``, with ``x0 < x1``, creates a function
which returns:
0, if ``x <= x0``;
``(x - x0) / (x1 - x0)``, if ``x0 < x <= x1``;
1, if ``x > x1``.
'''
def __init__(self, x0, x1):
'''
Initializes the function.
:Parameters:
x0
Start of the ramp;
x1
End of the ramp.
'''
self.__x0 = float(x0)
self.__x1 = float(x1)
self.__a = 1.0 / (self.__x1 - self.__x0)
def __call__(self, x):
y = numpy.select([ x < self.__x0, x < self.__x1 ],
[ 0.0, self.__a * (x - self.__x0) ], 1.0)
return FuzzySet(y)
################################################################################
class DecreasingRamp(Membership):
'''
Decreasing ramp.
Given two points, ``x0`` and ``x1``, with ``x0 < x1``, creates a function
which returns:
1, if ``x <= x0``;
``(x1 - x) / (x1 - x0)``, if ``x0 < x <= x1``;
0, if ``x > x1``.
'''
def __init__(self, x0, x1):
'''
Initializes the function.
:Parameters:
x0
Start of the ramp;
x1
End of the ramp.
'''
self.__x0 = float(x0)
self.__x1 = float(x1)
self.__a = 1.0 / (self.__x1 - self.__x0)
def __call__(self, x):
y = numpy.select([ x < self.__x0, x < self.__x1 ],
[ 1.0, self.__a * (self.__x1 - x) ], 0.0)
return FuzzySet(y)
################################################################################
class Triangle(Membership):
'''
Triangle function.
Given three points, ``x0``, ``x1`` and ``x2``, with ``x0 < x1 < x2``,
creates a function which returns:
0, if ``x <= x0`` or ``x > x2``;
``(x - x0) / (x1 - x0)``, if ``x0 < x <= x1``;
``(x2 - x) / (x2 - x1)``, if ``x1 < x <= x2``.
'''
def __init__(self, x0, x1, x2):
'''
Initializes the function.
:Parameters:
x0
Start of the triangle;
x1
Peak of the triangle;
x2
End of triangle.
'''
self.__x0 = float(x0)
self.__x1 = float(x1)
self.__x2 = float(x2)
self.__a0 = 1.0 / (self.__x1 - self.__x0)
self.__a1 = 1.0 / (self.__x2 - self.__x1)
def __call__(self, x):
y = numpy.select([ x < self.__x0, x < self.__x1, x < self.__x2 ],
[ 0.0, self.__a0 * (x - self.__x0),
self.__a1 * (self.__x2 - x)], 0.0)
return FuzzySet(y)
################################################################################
class Trapezoid(Membership):
'''
Trapezoid function.
Given four points, ``x0``, ``x1``, ``x2`` and ``x3``, with
``x0 < x1 < x2 < x3``, creates a function which returns:
0, if ``x <= x0`` or ``x > x3``;
``(x - x0)/(x1 - x0)``, if ``x0 <= x < x1``;
1, if ``x1 <= x < x2``;
``(x3 - x)/(x3 - x2)``, if ``x2 <= x < x3``.
'''
def __init__(self, x0, x1, x2, x3):
'''
Initializes the function.
:Parameters:
x0
Start of the trapezoid;
x1
First peak of the trapezoid;
x2
Last peak of the trapezoid;
x3
End of trapezoid.
'''
self.__x0 = float(x0)
self.__x1 = float(x1)
self.__x2 = float(x2)
self.__x3 = float(x3)
self.__a0 = 1.0 / (self.__x1 - self.__x0)
self.__a1 = 1.0 / (self.__x3 - self.__x2)
def __call__(self, x):
y = numpy.select([ x < self.__x0, x < self.__x1,
x < self.__x2, x < self.__x3 ],
[ 0.0, self.__a0 * (x - self.__x0), 1.0,
self.__a1 * (self.__x3 - x) ], 0.0)
return FuzzySet(y)
################################################################################
class Gaussian(Membership):
'''
Gaussian function.
Given the center and the width, creates a function which returns a gaussian
fit to these parameters, that is:
``exp(-(x - x0)**2)/a``
'''
def __init__(self, x0=0.0, a=1.0):
'''
Initializes the function.
:Parameters:
x0
Center of the gaussian. Default value ``0.0``;
a
Width of the gaussian. Default value ``1.0``.
'''
self.__x0 = float(x0)
self.__a = 1./float(a)
def __call__(self, x):
return FuzzySet(exp(- self.__a*(x - self.__x0)**2))
################################################################################
class IncreasingSigmoid(Membership):
'''
Increasing Sigmoid function.
Given the center and the slope, creates an increasing sigmoidal function.
It goes to ``0`` as ``x`` approaches to -infinity, and goes to ``1`` as
``x`` approaches infinity, that is:
``1 / (1 + exp(-a*(x - x0))``
'''
def __init__(self, x0=0.0, a=1.0):
'''
Initializes the function.
:Parameters:
x0
Center of the sigmoid. Default value ``0.0``. The function evaluates
to ``0.5`` if ``x = x0``;
a
Slope of the sigmoid. Default value ``1.0``.
'''
self.__x0 = float(x0)
self.__a = float(a)
def __call__(self, x):
return FuzzySet(1.0 / (1.0 + exp(- self.__a*(x - self.__x0))))
################################################################################
class DecreasingSigmoid(Membership):
'''
Decreasing Sigmoid function.
Given the center and the slope, creates an decreasing sigmoidal function.
It goes to ``1`` as ``x`` approaches to -infinity, and goes to ``0`` as
``x`` approaches infinity, that is:
``1 / (1 + exp(a*(x - x0))``
'''
def __init__(self, x0=0.0, a=1.0):
'''
Initializes the function.
:Parameters:
x0
Center of the sigmoid. Default value ``0.0``. The function evaluates
to ``0.5`` if ``x = x0``;
a
Slope of the sigmoid. Default value ``1.0``.
'''
self.__x0 = float(x0)
self.__a = float(a)
def __call__(self, x):
return FuzzySet(1.0 / (1.0 + exp(self.__a*(x - self.__x0))))
################################################################################
class RaisedCosine(Membership):
'''
Raised Cosine function.
Given the center and the frequency, creates a function that is a period of
a raised cosine, that is:
0, if ``x <= xm - pi/w`` or ``x > xm + pi/w``;
``0.5 + 0.5 * cos(w*(x - xm))``, if ``xm - pi/w <= x < xm + pi/w``;
'''
def __init__(self, xm=0.0, w=1.0):
'''
Initializes the function.
:Parameters:
xm
Center of the cosine. Default value ``0.0``. The function evaluates
to ``1`` if ``x = xm``;
w
Frequency of the cosine. Default value ``1.0``.
'''
self.__xm = float(xm)
self.__w = float(w)
self.__x0 = self.__xm - pi / self.__w
self.__x1 = self.__xm + pi / self.__w
def __call__(self, x):
y = numpy.select([ x < self.__x0, x < self.__x1 ],
[ 0.0, 0.5*cos(self.__w*(x - self.__xm)) + 0.5 ],
0.0)
return FuzzySet(y)
################################################################################
class Bell(Membership):
'''
Generalized Bell function.
A generalized bell is a symmetric function with its peak in its center and
fast decreasing to ``0`` outside a given interval, that is:
``1 / (1 + ((x - x0)/a)**(2*b))``
'''
def __init__(self, x0=0.0, a=1.0, b=1.0):
'''
Initializes the function.
:Parameters:
x0
Center of the bell. Default value ``0.0``. The function evaluates to
``1`` if ``x = xm``;
a
Size of the interval. Default value ``1.0``. A generalized bell
evaluates to ``0.5`` if ``x = -a`` or ``x = a``;
b
Measure of *flatness* of the bell. The bigger the value of ``b``,
the flatter is the resulting function. Default value ``1.0``.
'''
self.__x0 = float(x0)
self.__a = float(a)
self.__b = 2 * float(b)
def __call__(self, x):
return FuzzySet(1.0 / (1.0 + ((x - self.__x0)/self.__a)**self.__b))
################################################################################
class Smf(Membership):
'''
Increasing smooth curve with 0 and 1 minimum and maximum values outside a
given range.
'''
def __init__(self, x0, x1):
'''
Initializes the function.
:Parameters:
x0
Start of the curve. For every value below this, the function returns
0;
x1
End of the curve. For every value above this, the function returns
1;
'''
self.__x0 = x0
self.__x1 = x1
self.__xm = (x0 + x1) / 2.
self.__xr = x1 - x0
def __call__(self, x):
xa = (x - self.__x0) / self.__xr
xb = (x - self.__x1) / self.__xr
y = numpy.select([ x < self.__x0, x < self.__xm, x < self.__x1 ],
[ 0., 2.*xa*xa, 1. - 2.*xb*xb ], 1.)
return FuzzySet(y)
################################################################################
class Zmf(Membership):
'''
Decreasing smooth curve with 0 and 1 minimum and maximum values outside a
given range.
'''
def __init__(self, x0, x1):
'''
Initializes the function.
:Parameters:
x0
Start of the curve. For every value below this, the function returns
1;
x1
End of the curve. For every value above this, the function returns
0;
'''
self.__x0 = x0
self.__x1 = x1
self.__xm = (x0 + x1) / 2.
self.__xr = x1 - x0
def __call__(self, x):
xa = (x - self.__x0) / self.__xr
xb = (x - self.__x1) / self.__xr
y = numpy.select([ x < self.__x0, x < self.__xm, x < self.__x1 ],
[ 1., 1. - 2.*xa*xa, 2.*xb*xb ], 0.)
return FuzzySet(y)
################################################################################
# Auxiliary functions
################################################################################
def Saw(interval, n):
'''
Splits an ``interval`` into ``n`` triangle functions.
Given an interval in any domain, this function will create ``n`` triangle
functions of the same size equally spaced in the interval. It is very
useful to create membership functions for controllers. The command below
will create 3 triangle functions equally spaced in the interval (0, 4)::
mf1, mf2, mf3 = Saw((0, 4), 3)
This is the same as the following commands::
mf1 = Triangle(0, 1, 2)
mf2 = Triangle(1, 2, 3)
mf3 = Triangle(2, 3, 4)
:Parameters:
interval
A tuple containing the start and the end of the interval, in the format
``(start, end)``;
n
The number of functions in which the interval must be split.
:Returns:
A list of triangle membership functions, in order.
'''
xo, xf = interval
dx = float(xf - xo)/float(n+1)
mfs = [ ]
for i in range(n):
mfs.append(Triangle(xo, xo+dx, xo+2*dx))
xo = xo + dx
return mfs
################################################################################
def FlatSaw(interval, n):
'''
Splits an ``interval`` into a decreasing ramp, ``n-2`` triangle functions
and an increasing ramp.
Given an interval in any domain, this function will create a decreasing ramp
in the start of the interval, ``n-2`` triangle functions of the same size
equally spaced in the interval, and a increasing ramp in the end of the
interval. It is very useful to create membership functions for controllers.
The command below will create a decreasing ramp, a triangle function and an
increasing ramp equally spaced in the interval (0, 2)::
mf1, mf2, mf3 = FlatSaw((0, 2), 3)
This is the same as the following commands::
mf1 = DecreasingRamp(0, 1)
mf2 = Triangle(0, 1, 2)
mf3 = Increasingramp(1, 2)
:Parameters:
interval
A tuple containing the start and the end of the interval, in the format
``(start, end)``;
n
The number of functions in which the interval must be split.
:Returns:
A list of corresponding functions, in order.
'''
xo, xf = interval
dx = float(xf - xo)/float(n+1)
mf1 = DecreasingRamp(xo+dx, xo+2*dx)
mfs = Saw((xo+dx, xf-dx), n-2)
mf2 = IncreasingRamp(xf-2*dx, xf-dx)
return [ mf1 ] + mfs + [ mf2 ]
################################################################################
# Test.
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: sa/base.py
# Simulated Annealing
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements two versions of simulated annealing optimization. One
works with numeric data, and the other with a codified bit string. This last
method can be used in discrete optimization problems.
"""
################################################################################
from numpy import exp, abs, array, isnan, where
from numpy.random import uniform
from random import randrange
from bitarray import bitarray
import struct
import types
from neighbor import *
################################################################################
# Classes
################################################################################
class ContinuousSA(object):
'''
Simulated Annealing continuous optimization.
This is a simulated annealing optimizer implemented to work with vectors of
continuous variables (obviouslly, implemented as floating point numbers). In
general, simulated annealing methods searches for neighbors of one estimate,
which makes a lot more sense in discrete problems. While in this class the
method is implemented in a different way (to deal with continuous
variables), the principle is pretty much the same -- the neighbor is found
based on a gaussian neighborhood.
A simulated annealing algorithm adapted to deal with continuous variables
has an enhancement that can be used: a gradient vector can be given and, in
case the neighbor is not accepted, the estimate is updated in the downhill
direction.
'''
def __init__(self, f, x0, ranges=None, neighbor=GaussianNeighbor, optm=None,
T0=1000., rt=0.95, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
neighbor
Neighbor function. This is a function used to compute the neighbor
of the present estimate. You can use the ones defined in the
``neighbor`` module, or you can implement your own. In any case, the
``neighbor`` parameter must be an instance of ``ContinuousNeighbor``
or of a subclass. Please, see the documentation on the ``neighbor``
module for more information. The default is ``GaussianNeighbor``,
which computes the new estimate based on a gaussian distribution
around the present estimate.
optm
A standard optimizer such as gradient or Newton. This is used in
case the estimate is not accepted by the algorithm -- in this case,
a new estimate is computed in a standard way, providing a little
improvement in any case. It defaults to None; in that case, no
standard optimizatiion will be used. Notice that, if you want to use
a standard optimizer, you must create it before you instantiate this
class. By doing it this way, you can configure the optimizer in any
way you want. Please, consult the documentation in ``Gradient``,
``Newton`` and others.
T0
Initial temperature of the system. The temperature is, of course, an
analogy. Defaults to 1000.
rt
Temperature decreasing rate. The temperature must slowly decrease in
simulated annealing algorithms. In this implementation, this is
controlled by this parameter. At each step, the temperature is
multiplied by this value, so it is necessary that ``0 < rt < 1``.
Defaults to 0.95, smaller values make the temperature decay faster,
while larger values make the temperature decay slower.
h
Convergence step. In the case that the neighbor estimate is not
accepted, a simple gradient step is executed. This parameter is the
convergence step to the gradient step.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
self.__f = f
self.__x = array(x0).ravel()
self.__fx = f(self.__x)
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
# Verifies the validity of the neighbor method
try:
issubclass(neighbor, ContinuousNeighbor)
neighbor = neighbor()
except TypeError:
pass
if isinstance(neighbor, types.FunctionType):
neighbor = ContinuousNeighbor(neighbor)
if not isinstance(neighbor, ContinuousNeighbor):
raise TypeError, 'not a valid neighbor function'
else:
self.__nb = neighbor
self.__optm = optm
self.__t = float(T0)
self.__r = float(rt)
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def __get_fx(self):
return self.__fx
fx = property(__get_fx, None)
'''The value of the objective function at the present estimate.'''
def restart(self, x0, T0=1000., rt=0.95, h=0.5):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate. Restartings are essential to the working of
simulated annealing algorithms, to allow them to leave local minima.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
T0
Initial temperature of the system. The temperature is, of course, an
analogy. Defaults to 1000.
rt
Temperature decreasing rate. The temperature must slowly decrease in
simulated annealing algorithms. In this implementation, this is
controlled by this parameter. At each step, the temperature is
multiplied by this value, so it is necessary that ``0 < rt < 1``.
Defaults to 0.95, smaller values make the temperature decay faster,
while larger values make the temperature decay slower.
h
The initial step of the search. Defaults to 0.5
'''
self.__x = array(x0).ravel()
self.__fx = self.__f(self.__x)
self.__t = float(T0)
self.__r = float(rt)
self.__h = float(h)
def step(self):
'''
One step of the search.
In this method, a neighbor of the given estimate is chosen at random,
using a gaussian neighborhood. It is accepted as a new estimate if it
performs better in the cost function *or* if the temperature is high
enough. In case it is not accepted, a gradient step is executed.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
f = self.__f
x = self.__x
fx = self.__fx
# Next estimate
xn = self.__nb(x)
delta = f(xn) - fx
if delta < 0 or exp(-delta/self.__t) > uniform():
xr = xn
er = abs(delta)
elif self.__optm is not None:
self.__optm.restart(x0 = x)
xr, er = self.__optm.step()
else:
xr = x
er = abs(delta)
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
xr = where(xr < r0, r0, xr)
xr = where(xr > r1, r1, xr)
# Update state
self.__t = self.__t * self.__r
self.__x = xr
self.__fx = f(xr)
return (xr, er)
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class BinarySA(object):
'''
Simulated Annealing binary optimization.
This is a simulated annealing optimizer implemented to work with vectors of
bits, which can be floating point or integer numbers, characters or anything
allowed by the ``struct`` module of the Python standard library. The
neighborhood of an estimate is calculated by an appropriate method given in
the class instantiation. Given the nature of this implementation, no
alternate convergence can be used in the case of rejection of an estimate.
'''
def __init__(self, f, x0, ranges=[ ], fmt=None, neighbor=InvertBitsNeighbor,
T0=1000., rt=0.95, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
Ranges of values allowed for each component of the input vector. If
given, ranges are checked and a new estimate is generated in case
any of the components fall beyond the value. ``range`` can be a
tuple containing the inferior and superior limits of the interval;
in that case, the same range is used for every variable in the input
vector. ``range`` can also be a list of tuples of the same format,
inferior and superior limits; in that case, the first tuple is
assumed as the range allowed for the first variable, the second
tuple is assumed as the range allowed for the second variable and so
on.
fmt
A ``struct``-module string with the format of the data used. Please,
consult the ``struct`` documentation, since what is explained there
is exactly what is used here. For example, if you are going to use
the optimizer to deal with three-dimensional vectors of continuous
variables, the format would be something like::
fmt = 'fff'
Default value is an empty string. Notice that this is implemented as
a ``bitarray``, so this module must be present.
It is strongly recommended that integer numbers are used! Floating
point numbers can be simulated with long integers. The reason for
this is that random bit sequences can have no representation as
floating point numbers, and that can make the algorithm not perform
adequatelly.
The default value for this parameter is ``None``, meaning that a
default format is not supplied. If a format is not supplied, then
the estimate will be passed as a bitarray to the objective function.
This means that your function must take care to decode the bit
stream to extract meaning from it.
neighbor
Neighbor function. This is a function used to compute the neighbor
of the present estimate. You can use the ones defined in the
``neighbor`` module, or you can implement your own. In any case, the
``neighbor`` parameter must be an instance of ``BinaryNeighbor`` or
of a subclass. Please, see the documentation on the ``neighbor``
module for more information. The default is ``InvertBitsNeighbor``,
which computes the new estimate by inverting some bits in the
present estimate.
T0
Initial temperature of the system. The temperature is, of course, an
analogy. Defaults to 1000.
rt
Temperature decreasing rate. The temperature must slowly decrease in
simulated annealing algorithms. In this implementation, this is
controlled by this parameter. At each step, the temperature is
multiplied by this value, so it is necessary that ``0 < rt < 1``.
Defaults to 0.95, smaller values make the temperature decay faster,
while larger values make the temperature decay slower.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
self.__f = f
self.format = fmt
self.__set_x(x0)
# Determine ranges
if ranges is None:
self.ranges = None
elif len(ranges) == 1:
self.ranges = array(ranges * len(fmt))
else:
self.ranges = array(ranges)
# Verifies the validity of the neighbor method
try:
issubclass(neighbor, BinaryNeighbor)
neighbor = neighbor()
except TypeError:
pass
if isinstance(neighbor, types.FunctionType):
neighbor = BinaryNeighbor(neighbor)
if not isinstance(neighbor, BinaryNeighbor):
raise TypeError, 'not a valid neighbor function'
else:
self.__nb = neighbor
self.__t = float(T0)
self.__r = float(rt)
self.__xbest = self.__x[:] # Holds the best estimate so far and
self.__fbest = f(self.__get_x()) # its value on the objective function.
self.__emax = float(emax)
self.__imax = int(imax)
def __encode(self, values):
'''
Given the format of the estimate, encode the values. Return the
corresponding bitarray. The format used is the one defined in the class
instantiation.
:Parameters:
values
An array, list or tuple of values to be encoded.
:Returns:
The encoded bitarray.
'''
if self.format is None:
return values[:]
else:
x = bitarray()
x.fromstring(struct.pack(self.format, *values))
return x[:]
def __decode(self, bits):
'''
Given the format of the estimate, decode the bitarray. Return the
corresponding values in the form of a tuple. The format used is the one
defined in the class instantiation.
:Parameters:
bits
Bitarray containing the bits to be decoded. It must be compatible
with the informed format.
:Returns:
A tuple with the decoded values.
'''
if self.format is None:
return bits[:]
return struct.unpack(self.format, bits.tostring())
def __set_x(self, values):
'''
Setter for the estimate. The estimate must be given according to the
format given as parameter in the instantiation of the class, otherwise
an error will be raised; in that case, the estimate will be converted to
a bitarray and stored as this. In case that no format was informed, then
the estimate must be a bitarray and will be stored as such.
:Parameters:
x
New estimate
'''
self.__x = self.__encode(values)
def __get_x(self):
'''
Getter for the estimate. The estimate is decoded as the format supplied.
If no format was supplied, then the estimate is returned as a bitarray.
:Returns:
The estimate, decoded as the format.
'''
return self.__decode(self.__x)
x = property(__get_x, __set_x)
'''The estimate of the minimum'''
def __get_best(self):
'''
Getter for the best value so far. Returns a tuple containing both the
best estimate and its value.
:Returns:
A tuple ``(x, fx)``, where ``x`` is the best estimate so far, and
``fx`` is its value on the objective function.
'''
return (self.__decode(self.__xbest), self.__fbest)
best = property(__get_best, None)
'''A tuple ``(x, fx)``, where ``x`` is the best estimate so far, and
``fx`` is its value on the objective function.'''
def restart(self, x0, ranges=None, T0=1000., rt=0.95, h=0.5):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate. Restartings are essential to the working of
simulated annealing algorithms, to allow them to leave local minima.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
Ranges of values allowed for each component of the input vector. If
given, ranges are checked and a new estimate is generated in case
any of the components fall beyond the value. ``range`` can be a
tuple containing the inferior and superior limits of the interval;
in that case, the same range is used for every variable in the input
vector. ``range`` can also be a list of tuples of the same format,
inferior and superior limits; in that case, the first tuple is
assumed as the range allowed for the first variable, the second
tuple is assumed as the range allowed for the second variable and so
on.
T0
Initial temperature of the system. The temperature is, of course, an
analogy. Defaults to 1000.
rt
Temperature decreasing rate. The temperature must slowly decrease in
simulated annealing algorithms. In this implementation, this is
controlled by this parameter. At each step, the temperature is
multiplied by this value, so it is necessary that ``0 < rt < 1``.
Defaults to 0.95, smaller values make the temperature decay faster,
while larger values make the temperature decay slower.
'''
self.__set_x(x0)
if ranges is not None:
if len(ranges) == 1:
self.ranges = array(ranges * len(fmt))
else:
self.ranges = array(ranges)
self.__t = float(T0)
self.__r = float(rt)
def step(self):
'''
One step of the search.
In this method, a neighbor of the given estimate is obtained from the
present estimate by choosing ``nb`` bits and inverting them. It is
accepted as a new estimate if it performs better in the cost function
*or* if the temperature is high enough. In case it is not accepted, the
previous estimate is mantained.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
# Keep track of the best result so far.
f = self.__f
fx = f(self.__get_x())
if self.__fbest > fx:
self.__xbest = self.__x[:]
self.__fbest = fx
# Perform computation of neighbor by changing a number of bits in the
# bitarray representation of the estimate.
xn = self.__nb(self.__x)
# Performs a sanity check in the values.
xs = self.__decode(xn)
r = self.ranges
if r is not None:
x0 = r[:, 0]
x1 = r[:, 1]
if any(xs < x0) or any(xs > x1) or any(isnan(xs)):
xs = [ uniform(r0, r1) for r0, r1 in r ]
# Update step, using temperature to decide if the new estimate is kept.
delta = f(xs) - fx
if delta < 0 or exp(-delta/self.__t) > uniform():
self.__set_x(xs)
self.__t = self.__t * self.__r
return (self.__decode(self.__xbest), abs(delta))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__decode(self.__xbest), e
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: sa/neighbor.py
# Simulated Annealing
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This module implements a general class to compute neighbors for continuous and
binary simulated annealing algorithms. The continuous neighbor functions return
an array with a neighbor of a given estimate; the binary neighbor functions
return a ``bitarray`` object.
"""
################################################################################
from numpy import array, reshape, vectorize
from numpy.random import uniform, standard_normal
from random import randrange
import types
################################################################################
# Classes
################################################################################
class ContinuousNeighbor(object):
'''
Base class for continuous neighbor functions
This class should be derived to implement a function which computes the
neighbor of a given estimate. Every neighbor function should implement at
least two methods, defined below:
__init__(self, *cnf, **kw)
Initializes the object. There are no mandatory arguments, but any
parameters can be used here to configure the operator. For example, a
class can define a variance for randomly chose the neighbor -- this
should be defined here::
__init__(self, variance=1.0)
A default value should always be offered, if possible.
__call__(self, x):
The ``__call__`` interface should be programmed to actually compute the
value of the neighbor. This method should receive an estimate in ``x``
and use whatever parameters from the instantiation to compute the new
estimate. It should return the new estimate.
Please, note that the SA implementations relies on this behaviour: it will
pass an estimate to your ``__call__`` method and expects to received the
result back.
This class can be used also to transform a simple function in a neighbor
function. In this case, the outside function must compute in an appropriate
way the new estimate.
'''
def __init__(self, f):
'''
Creates a neighbor function from a function.
:Parameters:
f
The function to be transformed. This function must receive an array
of any size and shape as an estimate, and return an estimate of the
same size and shape as a result. A function that operates only over
a single number can be used -- in this case, the function operation
will propagate over all components of the estimate.
'''
if isinstance(f, types.FunctionType):
self.__f = vectorize(f)
else:
self.__f = f
def __call__(self, x):
'''
Computes the neighbor of the given estimate.
:Parameters:
x
The estimate to which the neighbor must be computed.
'''
return self.__f(x)
################################################################################
class GaussianNeighbor(ContinuousNeighbor):
'''
A new estimate based on a gaussian distribution
This class creates a function that computes the neighbor of an estimate by
adding a gaussian distributed randomly choosen vector with the same shape
and size of the estimate.
'''
def __init__(self, variance=0.05):
'''
Initializes the neighbor operator
:Parameters:
variance
This is the variance of the gaussian distribution used to randomize
the estimate. This can be given as a single value or as an array. In
the first case, the same value will be used for all the components
of the estimate; in the second case, ``variance`` should be an array
with the same number of components of the estimate, and each
component in this array is the variance of the corresponding
component in the estimate array.
'''
self.variance = variance
'''Variance of the gaussian distribution.'''
def __call__(self, x):
'''
Computes the neighbor of the given estimate.
:Parameters:
x
The estimate to which the neighbor must be computed.
'''
s = x.shape
x = array(x).ravel()
xn = x + self.variance*standard_normal(x.shape)
return reshape(xn, s)
################################################################################
class UniformNeighbor(ContinuousNeighbor):
'''
A new estimate based on a uniform distribution
This class creates a function that computes the neighbor of an estimate by
adding a uniform distributed randomly choosen vector with the same shape
and size of the estimate.
'''
def __init__(self, xl=-1.0, xh=1.0):
'''
Initializes the neighbor operator
:Parameters:
xl
The lower limit of the distribution;
xh
The upper limit of the distribution. Both values can be given as a
single value or as an array. In the first case, the same value will
be used for all the components of the estimate; in the second case,
they should be an array with the same number of components of the
estimate, and each component in this array is the variance of the
corresponding component in the estimate array.
'''
self.xl = xl
'''Lower limit of the uniform distribution.'''
self.xh = xh
'''Upper limit of the uniform distribution.'''
def __call__(self, x):
'''
Computes the neighbor of the given estimate.
:Parameters:
x
The estimate to which the neighbor must be computed.
'''
s = x.shape
x = array(x).ravel()
n = len(x)
xn = x + uniform(self.xl, self.xh, n)
return reshape(xn, s)
################################################################################
class BinaryNeighbor(object):
'''
Base class for binary neighbor functions
This class should be derived to implement a function which computes the
neighbor of a given estimate. Every neighbor functions should implement at
least two methods, defined below:
__init__(self, *cnf, **kw)
Initializes the object. There are no mandatory arguments, but any
parameters can be used here to configure the operator. For example, a
class can define a bit change rate -- this should be defined here::
__init__(self, rate=0.01)
A default value should always be offered, if possible.
__call__(self, x):
The ``__call__`` interface should be programmed to actually compute the
value of the neighbor. This method should receive an estimate in ``x``
and use whatever parameters from the instantiation to compute the new
estimate. It should return the new estimate.
Please, note that the SA implementations relies on this behaviour: it will
pass an estimate to your ``__call__`` method and expects to received the
result back. Notice, however, that the SA implementation does not expect
that the result is sane, ie, that it is in conformity with the
representation used in the algorithm. A sanity check is done inside the
binary SA class. Please, consult the documentation on ``BinarySA`` for
further details.
This class can be used also to transform a simple function in a neighbor
function. In this case, the outside function must compute in an appropriate
way the new estimate.
'''
def __init__(self, f):
'''
Creates a neighbor function from a function.
:Parameters:
f
The function to be transformed. This function must receive a
bitarray of any length as an estimate, and return a new bitarray of
the same length as a result.
'''
self.__f = f
def __call__(self, x):
'''
Computes the neighbor of the given estimate.
:Parameters:
x
The estimate to which the neighbor must be computed.
'''
return self.__f(x)
################################################################################
class InvertBitsNeighbor(BinaryNeighbor):
'''
A simple neighborhood based on the change of a few bits.
This neighbor will be computed by randomly choosing a bit in the bitarray
representing the estimate and change a number of bits in the bitarray and
inverting their value.
'''
def __init__(self, nb=2):
'''
Initializes the operator.
:Parameters:
nb
The number of bits to be randomly choosen to be inverted in the
calculation of the neighbor. Be very careful while choosing this
parameter. While very large optimizations can benefit from a big
value here, it is not recommended that more than one bit per
variable is inverted at each step -- otherwise, the neighbor might
fall very far from the present estimate, which can make the
algorithm not work accordingly. This defaults to 2, that is, at each
step, only one bit will be inverted at most.
'''
self.__nb = nb
def __call__(self, x):
'''
Computes the neighbor of the given estimate.
:Parameters:
x
The estimate to which the neighbor must be computed.
'''
xn = x[:]
for i in xrange(self.__nb):
index = randrange(len(xn))
xn[index] = 1 - xn[index]
return xn
################################################################################
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: sa/__init__.py
# Makes the sa directory a package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements optimization by simulated annealing. Consult:
base
Implementation of the basic simulated annealing algorithms;
neighbor
Some methods for determining the neighbor of the present estimate;
Simulated Annealing is a meta-heuristic designed for optimization of functions.
It tries to mimic the way that atoms settle in crystal structures of metals. By
slowly cooling the metal, atoms settle in a position of low energy -- thus, it
is a natural optimization method.
Two kinds of optimizer are implemented here. The continuous version of the
algorithm can be used for optimization of continuous objective functions; the
discrete (or binary) one, can be used in combinatorial optimization problems.
"""
# __all__ = [ 'base', 'neighbor' ]
################################################################################
# Imports sub-packages
from peach.sa.base import * # Basic definitions
from peach.sa.neighbor import * # Computation of the neighbor
| Python |
# -*- coding: utf-8 -*-
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: __init__.py
# Makes the peach directory a package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
*Peach* is a pure-Python package with aims to implement techniques of machine
learning and computational intelligence. It contains packages for
- Neural Networks, including, but not limited to, multi-layer perceptrons and
self-organizing maps;
- Fuzzy logic and fuzzy inference systems, including Mamdani-type and
Sugeno-type controllers;
- Optimization packages, including multidimensional optimization;
- Stochastic Optimizations, including genetic algorithms, simulated annealing,
particle swarm optimization;
- A lot more.
:Authors:
José Alexandre Nalon
"""
# Variables and information about the system
__version__ = "0.1.0"
# __all__ = [ 'nn', 'fuzzy', 'optm', 'ga', 'sa', 'pso' ]
################################################################################
# Imports sub-packages
from peach.nn import * # Neural network package
from peach.fuzzy import * # Fuzzy logic package
from peach.optm import * # Optimization package
from peach.ga import * # Genetic Algorithms package
from peach.sa import * # Simulated Annealing package
from peach.pso import * # Particle Swarm Optimization package
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/sa.py
# Simulated Annealing
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
General methods of stochastic optimization.
"""
################################################################################
# from numpy import
from optm import Optimizer
################################################################################
# Classes
################################################################################
class CrossEntropy(Optimizer):
'''
Multidimensional search based on cross-entropy technique.
In cross-entropy, a set of N possible solutions is randomly generated at
each interaction. To converge the solutions, the best M solutions are
selected and its statistics are calculated. A new set of solutions are
randomly generated from these statistics.
'''
def __init__(self, f, M=30, N=60, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
M
Size of the solution set used to calculate the statistics to
generate the next set of solutions
N
Total size of the solution set.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
self.__f = f
self.__M = int(M)
self.__N = int(N)
self.__emax = float(emax)
self.__imax = int(imax)
self.__solutions = [ ]
def step(self):
'''
One step of the search (*NOT IMPLEMENTED YET*)
In this method, the solution set is searched for the M best solutions.
Mean and variance of these solutions is calculated, and these values are
used to randomly generate, from a gaussian distribution, a set of N new
solutions.
'''
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/optm.py
# Basic definitions and base class
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
Basic definitons and base class for optimizers
This sub-package exports some auxiliary functions to work with cost functions,
namely, a function to calculate gradient vectors and hessian matrices, which are
extremely important in optimization.
Also, a base class, ``Optimizer``, for all optimizers. Sub-class this class if
you want to create your own optmizer, and follow the interface. This will allow
easy configuration of your own scripts and comparison between methods.
"""
################################################################################
from numpy import array, zeros, isscalar
################################################################################
# Auxiliary functions
################################################################################
def gradient(f, dx=1e-5):
'''
Creates a function that computes the gradient vector of a scalar field.
This function takes as a parameter a scalar function and creates a new
function that is able to compute the derivative (in case of single variable
functions) or the gradient vector (in case of multivariable functions.
Please, note that this function takes as a parameter a *function*, and
returns as a result *another function*. Calling the returned function on a
point will give the gradient vector of the original function at that point::
>>> def f(x):
return x^2
>>> df = gradient(f)
>>> df(1)
2
In the above example, ``df`` is a generated function which will return the
result of the expression ``2*x``, the derivative of the original function.
In the case ``f`` is a multivariable function, it is assumed that its
argument is a line vector.
:Parameters:
f
Any function, one- or multivariable. The function must be an scalar
function, though there is no checking at the moment the function is
created. If ``f`` is not an scalar function, an exception will be
raised at the moment the returned function is used.
dx
Optional argument that gives the precision of the calculation. It is
recommended that ``dx = sqrt(D)``, where ``D`` is the machine precision.
It defaults to ``1e-5``, which usually gives a good estimate.
:Returns:
A new function which, upon calling, gives the derivative or gradient
vector of the original function on the analised point. The parameter of
the returned function is a real number or a line vector where the gradient
should be calculated.
'''
def _df(x, dx=dx):
if isscalar(x):
x = float(x)
return (f(x+dx) - f(x-dx)) / (2.*dx)
else:
n = x.size
df = zeros((n, ))
for i in xrange(n):
xl = array(x)
xl[i] = xl[i] - dx
xr = array(x)
xr[i] = xr[i] + dx
df[i] = (f(xr) - f(xl)) / (2.*dx)
return df
return _df
def hessian(f, dx=1e-5):
'''
Creates a function that computes the hessian matrix of a scalar field.
This function takes as a parameter a scalar function and creates a new
function that is able to calculate the second derivative (in case of single
variable functions) or the hessian matrix (in case of multivariable
functions. Please, note that this function takes as a parameter a
*function*, and returns as a result *another function*. Calling the returned
function on a point will give the hessian matrix of the original function
at that point::
>>> def f(x):
return x^4
>>> ddf = hessian(f)
>>> ddf(1)
12
In the above example, ``ddf`` is a generated function which will return the
result of the expression ``12*x**2``, the second derivative of the original
function. In the case ``f`` is a multivariable function, it is assumed that
its argument is a line vector.
:Parameters:
f
Any function, one- or multivariable. The function must be an scalar
function, though there is no checking at the moment the function is
created. If ``f`` is not an scalar function, an exception will be
raised at the moment the returned function is used.
dx
Optional argument that gives the precision of the calculation. It is
recommended that ``dx = sqrt(D)``, where ``D`` is the machine precision.
It defaults to ``1e-5``, which usually gives a good estimate.
:Returns:
A new function which, upon calling, gives the second derivative or hessian
matrix of the original function on the analised point. The parameter of
the returned function is a real number or a line vector where the hessian
should be calculated.
'''
def _hf(x):
if isscalar(x):
x = float(x)
return (f(x+dx) - 2.*f(x) + f(x-dx)) / (4.*dx*dx)
else:
n = x.size
hf = zeros((n, n))
for i in range(n):
for j in range(n):
xll = array(x)
xll[i] = xll[i] - dx
xll[j] = xll[j] - dx
xul = array(x)
xul[i] = xul[i] - dx
xul[j] = xul[j] + dx
xlr = array(x)
xlr[i] = xlr[i] + dx
xlr[j] = xlr[j] - dx
xur = array(x)
xur[i] = xur[i] + dx
xur[j] = xur[j] + dx
hf[i, j] = (f(xur) - f(xlr) - f(xul) + f(xll)) / (4.*dx*dx)
return hf
return _hf
################################################################################
# Base classes
################################################################################
class Optimizer(object):
'''
Base class for all optimizers.
This class does nothing, and shouldn't be instantiated. Its only purpose is
to serve as a template (or interface) to implemented optimizers. To create
your own optimizer, subclass this.
This class defines 3 methods that should be present in any subclass. They
are defined here:
__init__
Initializes the optimizer. There are three usual parameters in this
method, which signature should be::
__init__(self, f, x0, ..., emax=1e-8, imax=1000)
where:
- ``f`` is the cost function to be minimized;
- ``x0`` is the first estimate of the location of the minimum;
- ``...`` represent additional configuration of the optimizer, and it
is dependent of the technique implemented;
- ``emax`` is the maximum allowed error. The default value above is
only a suggestion;
- ``imax`` is the maximum number of iterations of the method. The
default value above is only a suggestions.
step()
This method should take an estimate and calculate the next, possibly
better, estimate. Notice that the next estimate is strongly dependent of
the method, the optimizer state and configuration, and two calls to this
method with the same estimate might not give the same results. The
method signature is::
step(self)
and the implementation should keep track of all the needed parameters.
The method should return a tuple ``(x, e)`` with the new estimate of the
solution and the estimate of the error.
restart()
Implement this method to restart the optimizer. An optimizer might be
restarted for a number of reasons: to escape a local minimum, to try
different estimates and so on. This method should take at least one
argument, ``x0``, a new estimate for the optimizer. Optionally, new
configuration might be given, but, if not, the old ones must be used.
__call__
This method should take an estimate and iterate the optimizer until one
of the stop criteria is met: either less than the maximum error or more
than the maximum number of iterations. Error is usually calculated as an
estimate using the previous estimate, but any technique might be used.
Use a counter to keep track of the number of iterations. The method
signature is::
__call__(self)
and the implementation should keep track of all the needed parameters.
The method should return a tuple ``(x, e)`` with the final estimate of
the solution and the estimate of the error.
'''
def __init__(self, f=None, x0=None, emax=1e-8, imax=1000):
pass
def step(self, x):
pass
def __call__(self, x):
pass
################################################################################
# Test
if __name__ == "__main__":
pass | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/quasinewton.py
# Quasi-newton multivariable search methods
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements basic quasi-Newton optimizers. Newton optimizer is very
efficient, except that inverse matrices need to be calculated at each
convergence step. These methods try to estimate the hessian inverse iteratively,
thus increasing performance.
"""
################################################################################
import numpy
from numpy import dot, sum, abs, eye, array, outer
from numpy.linalg import inv
from base import Optimizer, gradient, hessian
################################################################################
# Classes
################################################################################
class DFP(Optimizer):
'''
DFP (*Davidon-Fletcher-Powell*) search
'''
def __init__(self, f, x0, ranges=None, df=None, h=0.1, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
df
A function to calculate the gradient vector of the cost function
``f``. Defaults to ``None``, if no gradient is supplied, then it is
estimated from the cost function using Euler equations.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
if df is None:
self.__df = gradient(f)
else:
self.__df = df
self.__B = inv(hessian(f)(self.__x))
self.__h = h
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
'''
self.__x = array(x0).ravel()
self.__B = inv(hessian(self.__f)(self.__x))
if h is not None:
self.__h = h
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent of parameters
calculated before (namely, the estimate of the inverse hessian), so it
is not recomended that different investigations are used with the same
optimizer in the same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
# Updates x
x = self.__x
B = self.__B
dfx = self.__df(x)
dx = - self.__h * dot(B, dfx)
xn = x + dx
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
xn = where(xn < r0, r0, xn)
xn = where(xn > r1, r1, xn)
# Updates B
y = self.__df(xn) - dfx
By = dot(B, y)
dB = outer(dx, dx) / dot(y, dx) - outer(By, By) / dot(y, By)
self.__B = B + dB
# Updates state
self.__x = xn
return xn, sum(abs(xn - x))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class BFGS(Optimizer):
'''
BFGS (*Broyden-Fletcher-Goldfarb-Shanno*) search
'''
def __init__(self, f, x0, ranges=None, df=None, h=0.1, emax=1e-5, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
df
A function to calculate the gradient vector of the cost function
``f``. Defaults to ``None``, if no gradient is supplied, then it is
estimated from the cost function using Euler equations.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
if df is None:
self.__df = gradient(f)
else:
self.__df = df
self.__B = inv(hessian(self.__f)(self.__x))
self.__h = h
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def restart(self, x0, h=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
'''
self.__x = array(x0).ravel()
self.__B = inv(hessian(self.__f)(self.__x))
if h is not None:
self.__h = h
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent of parameters
calculated before (namely, the estimate of the inverse hessian), so it
is not recomended that different investigations are used with the same
optimizer in the same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
# Updates x
x = self.__x
n = x.size
B = self.__B
dfx = self.__df(x)
dx = - self.__h * dot(B, dfx)
xn = x + dx
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
xn = where(xn < r0, r0, xn)
xn = where(xn > r1, r1, xn)
# Updates B
y = self.__df(xn) - dfx
ytx = dot(y.T, dx)
M = eye(n) - outer(y, dx.T) / ytx
self.__B = dot(dot(M.T, B), M) + outer(dx, dx) / ytx
# Updates state
self.__x = xn
return xn, sum(abs(xn - x))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class SR1(Optimizer):
'''
SR1 (*Symmetric Rank 1* ) search method
'''
def __init__(self, f, x0, ranges=None, df=None, h=0.1, emax=1e-5, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
df
A function to calculate the gradient vector of the cost function
``f``. Defaults to ``None``, if no gradient is supplied, then it is
estimated from the cost function using Euler equations.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
if df is None:
self.__df = gradient(f)
else:
self.__df = df
self.__B = inv(hessian(self.__f)(self.x))
self.__h = h
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
'''
self.__x = array(x0).ravel()
self.__B = inv(hessian(self.__f)(self.x))
if h is not None:
self.__h = h
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent of parameters
calculated before (namely, the estimate of the inverse hessian), so it
is not recomended that different investigations are used with the same
optimizer in the same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
# Updates x
x = self.__x
B = self.__B
dfx = self.__df(x)
dx = - self.__h * dot(B, dfx)
xn = x + dx
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
xn = where(xn < r0, r0, xn)
xn = where(xn > r1, r1, xn)
# Updates B
y = self.__df(xn) - dfx
M = dx - dot(B, y)
dB = outer(M, M) / dot(M, y)
self.__B = B + dB
self.__x = xn
return xn, sum(abs(xn - x))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
# Test
if __name__ == "__main__":
# Rosenbrock function
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Gradient of Rosenbrock function
def df(xy):
x, y = xy
return array( [ -2.*(1.-x) - 4.*x*(y-x*x), 2.*(y-x*x) ])
dfp = DFP(f, (0., 0.), emax=1e-12)
print dfp()
bfgs = BFGS(f, (0., 0.), emax=1e-12)
print bfgs()
sr1 = SR1(f, (0., 0.), emax=1e-12)
print sr1()
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/__init__.py
# Makes the optm directory a package and initializes it.
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements deterministic optimization methods. Consult:
base
Basic definitions and interface with the optimization methods;
linear
Basic methods for one variable optimization;
multivar
Gradient, Newton and othe multivariable optimization methods;
quasinewton
Quasi-Newton methods;
Every optimizer works in pretty much the same way. Instantiate the respective
class, using as parameter the cost function to be optimized, the first estimate
(a scalar in case of a single variable optimization, and a one-dimensional array
in case of multivariable optimization) and some other parameters. Use ``step()``
to perform one iteration of the method, use the ``__call__()`` method to perform
the search until the stop conditions are met. See each method for details.
"""
# __all__ = [ 'base', 'linear', 'multivar', 'quasinewton' ]
################################################################################
# Imports sub-packages
from peach.optm.base import * # Basic definitions
from peach.optm.linear import * # Linear and 1-D optimization
from peach.optm.multivar import * # Gradient and Newton methods
from peach.optm.quasinewton import * # Quasi-newton methods
| Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/multivar.py
# Gradient and multivariable search methods
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements basic multivariable optimizers, including gradient and
Newton searches.
"""
################################################################################
from numpy import array, dot, abs, sum, roll, ones, eye, isscalar, zeros
from numpy.linalg import inv
from base import Optimizer, gradient, hessian
################################################################################
# Classes
################################################################################
class Direct(Optimizer):
'''
Multidimensional direct search
This optimization method is a generalization of the 1D method, using
variable swap as search direction. This results in a very simplistic and
inefficient method that should be used only when any other method fails.
'''
def __init__(self, f, x0, ranges=None, h=0.5, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
h
The initial step of the search. Defaults to 0.5
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
n = self.__x.size
self.__h = ones((n, ))
self.__h[0] = -0.5
self.__dx = h * eye(n, 1).reshape(self.__x.shape)
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=0.5):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
The initial step of the search. Defaults to 0.5
'''
self.__x = array(x0).ravel()
n = self.__x.size
self.__h = ones((n, ))
self.__h[0] = -0.5
self.__dx = h * eye(n, 1).reshape(self.__x.shape)
def step(self):
'''
One step of the search.
In this method, the result of the step is highly dependent of the steps
executed before, as the search step is updated at each call to this
method.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
f = self.__f
x = self.__x
dx = self.__dx
fo = f(x)
# Next estimate
x = x + dx
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
x = where(x < r0, r0, x)
x = where(x > r1, r1, x)
# Update state
fn = f(x)
if fn > fo:
self.__dx = self.__h * roll(dx, 1)
self.__x = x
return x, sum(abs(self.__dx))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
i = 0
e = sum(abs(self.__dx))
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class Gradient(Optimizer):
'''
Gradient search
This method uses the fact that the gradient of a function points to the
direction of largest increase in the function (in general called *uphill*
direction). So, the contrary direction (*downhill*) is used as search
direction.
'''
def __init__(self, f, x0, ranges=None, df=None, h=0.1, emax=1e-5, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
df
A function to calculate the gradient vector of the cost function
``f``. Defaults to ``None``, if no gradient is supplied, then it is
estimated from the cost function using Euler equations.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
if df is None:
self.__df = gradient(f)
else:
self.__df = df
self.__h = h
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
'''
self.__x = array(x0).ravel()
if h is not None:
self.__h = h
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent only of the given
estimated, so it can be used for different kind of investigations on the
same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
x = self.__x
xold = x
# New estimate
x = x - self.__h * self.__df(x)
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
x = where(x < r0, r0, x)
x = where(x > r1, r1, x)
# Update state
self.__x = x
return x, sum(abs(x - xold))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class MomentumGradient(Optimizer):
'''
Gradient search with momentum
This method uses the fact that the gradient of a function points to the
direction of largest increase in the function (in general called *uphill*
direction). So, the contrary direction (*downhill*) is used as search
direction. A momentum term is added to avoid local minima.
'''
def __init__(self, f, x0, ranges=None, df=None, h=0.1, a=0.1, emax=1e-5, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
df
A function to calculate the gradient vector of the cost function
``f``. Defaults to ``None``, if no gradient is supplied, then it is
estimated from the cost function using Euler equations.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
Defaults to 0.1.
a
Momentum term. This term is a measure of the memory of the optmizer.
The bigger it is, the more the past values influence in the outcome
of the optimization. Defaults to 0.1
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
self.__dx = zeros(self.__x.shape)
if df is None:
self.__df = gradient(f)
else:
self.__df = df
self.__h = h
self.__a = a
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=None, a=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
If not given in this method, the old value is used.
a
Momentum term. This term is a measure of the memory of the optmizer.
The bigger it is, the more the past values influence in the outcome
of the optimization. If not given in this method, the old value is
used.
'''
self.__x = array(x0).ravel()
if h is not None:
self.__h = h
if a is not None:
self.__a = a
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent only of the given
estimated, so it can be used for different kind of investigations on the
same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
x = self.__x
xold = x
# New estimate
dx = - self.__h * self.__df(x) + self.__a * self.__dx
x = x + dx
self.__dx = dx
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
x = where(x < r0, r0, x)
x = where(x > r1, r1, x)
# Update state
self.__x = x
return x, sum(abs(x - xold))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class Newton(Optimizer):
'''
Newton search
This is a very effective method to find minimum points in functions. In a
very basic fashion, this method corresponds to using Newton root finding
method on f'(x). Converges *very* fast if the cost function is quadratic
of similar to it.
'''
def __init__(self, f, x0, ranges=None, df=None, hf=None, h=0.1, emax=1e-5, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A multivariable function to be optimized. The function should have
only one parameter, a multidimensional line-vector, and return the
function value, a scalar.
x0
First estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
ranges
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a list of ranges
for each variable of the objective function. It is specified as a
list of tuples of two values, ``(x0, x1)``, where ``x0`` is the
start of the interval, and ``x1`` its end. Obviously, ``x0`` should
be smaller than ``x1``. It can also be given as a list with a simple
tuple in the same format. In that case, the same range will be
applied for every variable in the optimization.
df
A function to calculate the gradient vector of the cost function
``f``. Defaults to ``None``, if no gradient is supplied, then it is
estimated from the cost function using Euler equations.
hf
A function to calculate the hessian matrix of the cost function
``f``. Defaults to ``None``, if no hessian is supplied, then it is
estimated from the cost function using Euler equations.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = array(x0).ravel()
if df is None:
self.__df = gradient(f)
else:
self.__df = df
if hf is None:
self.__hf = hessian(f)
else:
self.__hf = hf
self.__h = h
# Determine ranges of the variables
if ranges is not None:
ranges = list(ranges)
if len(ranges) == 1:
ranges = array(ranges * len(x0[0]))
else:
ranges = array(ranges)
self.ranges = ranges
'''Holds the ranges for every variable. Although it is a writable
property, care should be taken in changing parameters before ending the
convergence.'''
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
New estimate of the minimum. Estimates can be given in any format,
but internally they are converted to a one-dimension vector, where
each component corresponds to the estimate of that particular
variable. The vector is computed by flattening the array.
h
Convergence step. This method does not takes into consideration the
possibility of varying the convergence step, to avoid Stiefel cages.
'''
self.__x = array(x0).ravel()
if h is not None:
self.__h = h
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent only of the given
estimated, so it can be used for different kind of investigations on the
same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
x = self.__x
xold = x
df = self.__df(x)
hf = self.__hf(x)
# New estimate
try:
x = x - self.__h * dot(inv(hf), df)
except:
x = x - self.__h * df / hf
# Sanity check
if self.ranges is not None:
r0 = self.ranges[:, 0]
r1 = self.ranges[:, 1]
x = where(x < r0, r0, x)
x = where(x > r1, r1, x)
# Update state
self.__x = x
return x, sum(abs(x - xold))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
# Test
if __name__ == "__main__":
# Rosenbrock function
def f(xy):
x, y = xy
return (1.-x)**2. + (y-x*x)**2.
# Gradient of Rosenbrock function
def df(xy):
x, y = xy
return array( [ -2.*(1.-x) - 4.*x*(y - x*x), 2.*(y - x*x) ])
# Hessian of Rosenbrock function
def hf(xy):
x, y = xy
return array([ [ 2. - 4.*(y - 3.*x*x), -4.*x ],
[ -4.*x, 2. ] ])
linear = Direct(f, (0., 0.), emax=1e-12)
print linear()
grad = Gradient(f, (0., 0.), df=df, emax=1e-12)
print grad()
grad2 = Gradient(f, (0., 0.), emax=1e-12)
print grad2()
mgrad = MomentumGradient(f, (0., 0.), df=df, emax=1e-12)
print mgrad()
mgrad2 = MomentumGradient(f, (0., 0.), emax=1e-12)
print mgrad2()
newton = Newton(f, (0., 0.), df=df, hf=hf, emax=1e-12)
print newton()
newton2 = Newton(f, (0., 0.), emax=1e-12)
print newton2() | Python |
################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: optm/linear.py
# 1D search methods
################################################################################
# Doc string, reStructuredText formatted:
__doc__ = """
This package implements basic one variable only optimizers.
"""
################################################################################
from numpy import abs, max
from base import Optimizer
################################################################################
# Classes
################################################################################
class Direct1D(Optimizer):
'''
1-D direct search.
This methods 'oscilates' around the function minimum, reducing the updating
step until it achieves the maximum error or the maximum number of steps.
This is a very inefficient method, and should be used only at times where no
other methods are able to converge (eg., if a function has a lot of
discontinuities, or similar conditions).
'''
def __init__(self, f, x0, range=None, h=0.5, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A one variable only function to be optimized. The function should
have only one parameter and return the function value.
x0
First estimate of the minimum. Since this is a linear method, this
should be a ``float`` or ``int``.
range
A range of values might be passed to the algorithm, but it is not
necessary. If supplied, this parameter should be a tuples of two
values, ``(x0, x1)``, where ``x0`` is the start of the interval, and
``x1`` its end. Obviously, ``x0`` should be smaller than ``x1``.
When this parameter is present, the algorithm will not let the
estimates fall outside the given interval.
h
The initial step of the search. Defaults to 0.5
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = x0
self.range = range
'''Holds the range for the estimates. If this attribute is set, the
algorithm will never let the estimates fall outside the given
interval.'''
self.__h = h
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0, h=None):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
The new initial value of the estimate of the minimum. Since this is
a linear method, this should be a ``float`` or ``int``.
h
The initial step of the search. Defaults to 0.5
'''
self.__x = x0
if h is not None:
self.__h = h
def step(self):
'''
One step of the search.
In this method, the result of the step is highly dependent of the steps
executed before, as the search step is updated at each call to this
method.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
estimate of the minimum, and ``e`` is the estimated error.
'''
f = self.__f
x = self.__x
fo = f(x)
# Computes next estimate
x = x + self.__h
# Sanity check
if self.range is not None:
r0, r1 = self.range
if x < r0: x = r0
if x > r1: x = r1
# Update state
fn = f(x)
if fn > fo:
self.__h = - self.__h / 2.
self.__x = x
return x, abs(self.__h)
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
i = 0
while abs(self.__h) > emax/2. and i < imax:
_, e = self.step()
i = i + 1
return self.__x, e
################################################################################
class Interpolation(Optimizer):
'''
Optimization by quadractic interpolation.
This methods takes three estimates and finds the parabolic function that
fits them, and returns as a new estimate the vertex of the parabola. The
procedure can be repeated until a good approximation is found.
'''
def __init__(self, f, x0, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A one variable only function to be optimized. The function should
have only one parameter and return the function value.
x0
First estimate of the minimum. The interpolation search needs three
estimates to approximate the parabolic function. Thus, the first
estimate must be a triple ``(xl, xm, xh)``, with the property that
``xl < xm < xh``. Be aware, however, that no checking is done -- if
the estimate doesn't correspond to this condition, in some point an
exception will be raised.
Notice that, given the nature of the estimate of the interpolation
method, it is not necessary to have a specific parameter to restrict
the range of acceptable values -- it is already embedded in the
estimate. If you need to restrict your estimate between an interval,
just use its limits as ``xl`` and ``xh`` in the estimate.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = x0
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
The new initial value of the estimate of the minimum. The
interpolation search needs three estimates to approximate the
parabolic function. Thus, the estimate must be a triple
``(xl, xm, xh)``, with the property that ``xl < xm < xh``. Be aware,
however, that no checking is done -- if the estimate doesn't
correspond to this condition, in some point an exception will be
raised.
'''
self.__x = x0
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent only of the given
estimated, so it can be used for different kind of investigations on the
same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
triplet of estimates of the minimum, and ``e`` is the estimated error.
'''
x0, x1, x2 = self.__x
f = self.__f
q0 = x0 * (f(x1) - f(x2))
q1 = x1 * (f(x2) - f(x0))
q2 = x2 * (f(x0) - f(x1))
q = q0 + q1 + q2
if q == 0:
return (x0, x1, x2), max(abs(x1-x0), abs(x2-x1))
xm = 0.5 * (x0*q0 + x1*q1 + x2*q2) / (q0 + q1 + q2)
if xm < x0:
x = (xm, x0, x1)
elif x0 < xm < x1:
x = (x0, xm, x1)
elif x1 < xm < x2:
x = (x1, xm, x2)
elif x2 < xm:
x = (x1, x2, xm)
else:
x = (xm, xm, xm)
self.__x = x
return x, max(abs(xm-x0), abs(x2-xm))
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
x0, x1, x2 = self.__x
_, e = self.step()
if x0 == x1:
return x0, e
elif x1 == x2:
return x1, e
elif x0 == x2:
return x2, e
i = i + 1
f = self.__f
q0 = x0 * (f(x1) - f(x2))
q1 = x1 * (f(x2) - f(x0))
q2 = x2 * (f(x0) - f(x1))
xm = 0.5 * (x0*q0 + x1*q1 + x2*q2) / (q0 + q1 + q2)
return xm, e
################################################################################
class GoldenRule(Optimizer):
'''
Optimizer by the Golden Section Rule
This optimizer uses the golden rule to section an interval in search of the
minimum. Using a simple heuristic, the interval is refined until an interval
small enough to satisfy the error requirements is found.
'''
def __init__(self, f, x0, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A one variable only function to be optimized. The function should
have only one parameter and return the function value.
x0
First estimate of the minimum. The golden rule search needs two
estimates to partition the interval. Thus, the first estimate must
be a duple ``(xl, xh)``, with the property that ``xl < xh``. Be
aware, however, that no checking is done -- if the estimate doesn't
correspond to this condition, in some point an exception will be
raised.
Notice that, given the nature of the estimate of the golden rule
method, it is not necessary to have a specific parameter to restrict
the range of acceptable values -- it is already embedded in the
estimate. If you need to restrict your estimate between an interval,
just use its limits as ``xl`` and ``xh`` in the estimate.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = x0
self.__k = 0.6180339887498949 # Golden ratio
self.__emax = float(emax)
self.__imax = int(imax)
def __get_x(self):
return self.__x
def __set_x(self, x0):
self.restart(x0)
x = property(__get_x, __set_x)
'''The estimate of the position of the minimum.'''
def restart(self, x0):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
The new value of the estimate of the minimum. The golden rule search
needs two estimates to partition the interval. Thus, the estimate
must be a duple ``(xl, xh)``, with the property that ``xl < xh``.
'''
self.__x = x0
def step(self):
'''
One step of the search.
In this method, the result of the step is dependent only of the given
estimated, so it can be used for different kind of investigations on the
same cost function.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
duple of estimates of the minimum, and ``e`` is the estimated error.
'''
x0, x1 = self.__x
f = self.__f
k = self.__k
k1 = 1 - k
xl = k*x0 + k1*x1
xh = k1*x0 + k*x1
if f(xl) > f(xh):
x = (xl, x1)
e = abs(x1 - xl)
else:
x = (x0, xh)
e = abs(xh - x0)
self.__x = x
return x, e
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
xl, xh = self.__x
return 0.5 * (xl + xh), e
################################################################################
class Fibonacci(Optimizer):
'''
Optimization by the Golden Rule Section, estimated by Fibonacci numbers.
This optimizer uses the golden rule to section an interval in search of the
minimum. Using a simple heuristic, the interval is refined until an interval
small enough to satisfy the error requirements is found. The golden section
is estimated at each step using Fibonacci numbers. This can be useful in
situations where only integer numbers should be used.
'''
def __init__(self, f, x0, emax=1e-8, imax=1000):
'''
Initializes the optimizer.
To create an optimizer of this type, instantiate the class with the
parameters given below:
:Parameters:
f
A one variable only function to be optimized. The function should
have only one parameter and return the function value.
x0
First estimate of the minimum. The Fibonacci search needs two
estimates to partition the interval. Thus, the first estimate must
be a duple ``(xl, xh)``, with the property that ``xl < xh``. Be
aware, however, that no checking is done -- if the estimate doesn't
correspond to this condition, in some point an exception will be
raised.
Notice that, given the nature of the estimate of the Fibonacci
method, it is not necessary to have a specific parameter to restrict
the range of acceptable values -- it is already embedded in the
estimate. If you need to restrict your estimate between an interval,
just use its limits as ``xl`` and ``xh`` in the estimate.
emax
Maximum allowed error. The algorithm stops as soon as the error is
below this level. The error is absolute.
imax
Maximum number of iterations, the algorithm stops as soon this
number of iterations are executed, no matter what the error is at
the moment.
'''
Optimizer.__init__(self)
self.__f = f
self.__x = x0
self.__k1 = 1.
self.__k2 = 1.
self.__emax = float(emax)
self.__imax = int(imax)
def restart(self, x0):
'''
Resets the optimizer, returning to its original state, and allowing to
use a new first estimate.
:Parameters:
x0
The new value of the estimate of the minimum. The Fibonacci search
needs two estimates to partition the interval. Thus, the estimate
must be a duple ``(xl, xh)``, with the property that ``xl < xh``. Be
aware, however, that no checking is done -- if the estimate doesn't
correspond to this condition, in some point an exception will be
raised.
'''
self.__x = x0
def step(self):
'''
One step of the search.
In this method, the result of the step is highly dependent of the steps
executed before, as the estimate of the golden ratio is updated at each
call to this method.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the updated
duple of estimates of the minimum, and ``e`` is the estimated error.
'''
x0, x1 = self.__x
f = self.__f
k1 = self.__k1
k2 = self.__k2
self.__k1 = k2
self.__k2 = k1 + k2
k = k1 / k2
k1 = 1 - k
xl = k*x0 + k1*x1
xh = k1*x0 + k*x1
if f(xl) > f(xh):
x = (xl, x1)
e = abs(x1 - xl)
else:
x = (x0, xh)
e = abs(xh - x0)
self.__x = x
return x, e
def __call__(self):
'''
Transparently executes the search until the minimum is found. The stop
criteria are the maximum error or the maximum number of iterations,
whichever is reached first. Note that this is a ``__call__`` method, so
the object is called as a function. This method returns a tuple
``(x, e)``, with the best estimate of the minimum and the error.
:Returns:
This method returns a tuple ``(x, e)``, where ``x`` is the best
estimate of the minimum, and ``e`` is the estimated error.
'''
emax = self.__emax
imax = self.__imax
e = emax
i = 0
while e > emax/2. and i < imax:
_, e = self.step()
i = i + 1
xl, xh = self.__x
return 0.5* (xl + xh), e
################################################################################
# Test
if __name__ == "__main__":
# Rosenbrock function
def f(x):
return (1.-x)**2. + (1.-x*x)**2.
linear = Direct1D(f, 3.21345, emax=1e-12)
print linear()
interp = Interpolation(f, (0., 0.75, 1.5), emax=1e-12)
print interp()
golden = GoldenRule(f, (0.75, 1.4), emax=1e-12)
print golden()
fibo = Fibonacci(f, (0.75, 1.4), emax=1e-12)
print fibo()
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django import forms
from django.forms import ModelForm
from models import *
class FormEvento(ModelForm):
datahora = forms.DateTimeField(
widget=forms.DateInput(format='%d/%m/%Y'),
input_formats=['%d/%m/%y', '%d/%m/%Y'])
categoria = forms.ModelChoiceField(queryset=Categoria.objects.all(), empty_label="Selecione")
valor = forms.ModelChoiceField(queryset=Evento.objects.filter(categoriaid=1), empty_label="Selecione")
valor2 = forms.ModelChoiceField(queryset=Evento.objects.filter(categoriaid=2), empty_label="Selecione")
valor3 = forms.ModelChoiceField(queryset=Evento.objects.filter(categoriaid=3), empty_label="Selecione")
class Meta:
model = Evento
fields = ('datahora', 'valor') | Python |
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
class Categoria(models.Model):
nome = models.CharField(max_length=50)
maximo = models.FloatField()
minimo = models.FloatField()
class Meta:
ordering = ('nome',)
def __unicode__(self):
return self.nome
class Evento(models.Model):
datahora = models.DateTimeField(default=datetime.now, blank=True)
valor = models.FloatField()
categoriaid = models.ForeignKey('Categoria')
class Meta:
ordering = ('datahora',)
def __unicode__(self):
return str(self.categoriaid) + ' de ' + str(self.datahora)
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
#!/usr/bin/python
from pychartdir import *
def GerarGrafico(valor, tempo, evento):
# The data for the line chart
listaValor = []
for x in valor:
listaValor.append(x)
# The labels for the line chart
listaTempo = []
for x in tempo:
listaTempo.append(str(x))
# Create a XYChart object of size 500 x 500 pixels
c = XYChart(490, 300)
# Set the plotarea at (40, 50) and of size 400 x 400 pixels
c.setPlotArea(28, 10, 448, 260)
# Add a line chart layer using the given data
c.addLineLayer(listaValor)
# Set the labels on the x axis.
c.xAxis().setLabels(listaTempo)
# Display 1 out of 3 labels on the x-axis.
c.xAxis().setLabelStep(1)
# Output the chart
if evento == 1:
c.makeChart("C:/fotometria/media/graficos/graficoTemperatura.png")
elif evento == 2:
c.makeChart("C:/fotometria/media/graficos/graficoUmidade.png")
else:
c.makeChart("C:/fotometria/media/graficos/graficoRadiacao.png")
def ExibirGrafico(valor, tempo, evento):
# The data for the line chart
listaValor = []
for x in valor:
listaValor.append(x)
# The labels for the line chart
listaTempo = []
for x in tempo:
listaTempo.append(str(x))
# Create a XYChart object of size 500 x 500 pixels
c = XYChart(490, 300)
# Set the plotarea at (40, 50) and of size 400 x 400 pixels
c.setPlotArea(28, 10, 448, 260)
# Add a line chart layer using the given data
c.addLineLayer(listaValor)
# Set the labels on the x axis.
c.xAxis().setLabels(listaTempo)
# Display 1 out of 3 labels on the x-axis.
c.xAxis().setLabelStep(1)
# Output the chart
if evento == 1:
c.makeChart("C:/fotometria/media/graficos/ConstultaGraficotemperatura.png")
elif evento == 2:
c.makeChart("C:/fotometria/media/graficos/ConstultaGraficoumidade-relativa.png")
else:
c.makeChart("C:/fotometria/media/graficos/ConstultaGraficointensidade-de-radiacao-solar.png")
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.