file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
render.py | import collections
import random
import pyglet
from entity import Component
from fov import InFOV
from generator import LayoutGenerator
from hud import HUD
from light import LightOverlay
from message import LastMessagesView
from position import Position
from temp import floor_tex, get_wall_tex, dungeon_tex
from util import event_property
class TextureGroup(pyglet.graphics.TextureGroup):
"""A batch group that binds texture and sets mag filter to NEAREST not to screw our pretty pixel art"""
def set_state(self):
super(TextureGroup, self).set_state()
pyglet.gl.glTexParameteri(self.texture.target, pyglet.gl.GL_TEXTURE_MAG_FILTER, pyglet.gl.GL_NEAREST)
class ZoomGroup(pyglet.graphics.Group):
def __init__(self, zoom, parent=None):
super(ZoomGroup, self).__init__(parent)
self.zoom = zoom
def set_state(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glScalef(self.zoom, self.zoom, 1)
def unset_state(self):
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.zoom == other.zoom and
self.parent == other.parent
)
def __hash__(self):
return hash((self.zoom, self.parent))
def __repr__(self):
return '%s(zoom=%d)' % (self.__class__.__name__, self.zoom)
class CameraGroup(pyglet.graphics.Group):
def __init__(self, window, zoom_factor, focus=None, parent=None):
super(CameraGroup, self).__init__(parent)
self.window = window
self.zoom_factor = zoom_factor
self.focus = focus
def set_state(self):
if self.focus is not None:
cam_x = self.window.width / 2 - self.focus.x * self.zoom_factor
cam_y = self.window.height / 2 - self.focus.y * self.zoom_factor
pyglet.gl.gl.glPushMatrix()
pyglet.gl.gl.glTranslatef(cam_x, cam_y, 0)
def unset_state(self):
if self.focus is not None:
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.window is other.window and
self.zoom_factor == other.zoom_factor and
self.parent == other.parent
)
def __hash__(self):
return hash((self.window, self.zoom_factor, self.parent))
class Animation(pyglet.event.EventDispatcher):
def __init__(self, duration):
self.elapsed = 0.0
self.duration = duration
pyglet.clock.schedule_interval(self._animate, 0.001)
def cancel(self):
pyglet.clock.unschedule(self._animate)
self.dispatch_event('on_finish', self)
def get_elapsed_ratio(self):
return self.elapsed / self.duration
def _animate(self, dt):
self.elapsed += dt
if self.elapsed > self.duration:
self.cancel()
else:
self.dispatch_event('on_update', self, dt)
Animation.register_event_type('on_update')
Animation.register_event_type('on_finish')
class Renderable(Component):
COMPONENT_NAME = 'renderable'
def __init__(self, image, memorable=False):
self._image = image
self.memorable = memorable
image = event_property('_image', 'image_change')
class LayoutRenderable(Component):
COMPONENT_NAME = 'layout_renderable'
def __init__(self, tile):
self.tile = tile
class RenderSystem(object):
zoom = 3
GROUP_LEVEL = pyglet.graphics.OrderedGroup(0)
GROUP_DIGITS = pyglet.graphics.OrderedGroup(1)
GROUP_HUD = pyglet.graphics.OrderedGroup(2)
def __init__(self, level):
self._level = level
self._window = level.game.game.window
self._batch = pyglet.graphics.Batch()
self._animations = set()
self._sprites = {}
self._level_vlist = None
self._light_overlay = None
self._last_messages_view = LastMessagesView(level.game.message_log, self._window.width, self._window.height, batch=self._batch, group=self.GROUP_HUD)
self._hud = HUD(batch=self._batch, group=self.GROUP_HUD)
self._level_group = ZoomGroup(self.zoom, CameraGroup(self._window, self.zoom, self.GROUP_LEVEL))
self._digits_group = CameraGroup(self._window, self.zoom, self.GROUP_DIGITS)
self._memory = collections.defaultdict(list)
def update_player(self):
player_sprite = self._sprites[self._level.player]
self._digits_group.focus = player_sprite
self._level_group.parent.focus = player_sprite
self._hud.player = self._level.player
def render_level(self):
vertices = []
tex_coords = []
for x in xrange(self._level.size_x):
for y in xrange(self._level.size_y):
x1 = x * 8
x2 = x1 + 8
y1 = y * 8
y2 = y1 + 8
for entity in self._level.position_system.get_entities_at(x, y):
|
else:
continue
# always add floor, because we wanna draw walls above floor
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(floor_tex.tex_coords)
if tile == LayoutGenerator.TILE_WALL:
# if we got wall, draw it above floor
tex = get_wall_tex(self._level.get_wall_transition(x, y))
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(tex.tex_coords)
group = TextureGroup(dungeon_tex, pyglet.graphics.OrderedGroup(Position.ORDER_FLOOR, self._level_group))
self._level_vlist = self._batch.add(len(vertices) / 2, pyglet.gl.GL_QUADS, group,
('v2i/static', vertices),
('t3f/statc', tex_coords),
)
group = pyglet.graphics.OrderedGroup(Position.ORDER_PLAYER + 1, self._level_group)
self._light_overlay = LightOverlay(self._level.size_x, self._level.size_y, self._batch, group)
def update_light(self, old_lightmap, new_lightmap):
# for all changed cells
for key in set(old_lightmap).union(new_lightmap):
lit = key in new_lightmap
memory = self._memory[key]
# if cell is lit, add it to memory and clear all memory sprites, if there are any
if lit:
for sprite in memory:
sprite.delete()
memory[:] = []
# for every entity in cell
for entity in self._level.position_system.get_entities_at(*key):
# set in_fov flag
# TODO: this doesnt belong to rendering, but i don't want to loop twice
infov = entity.get(InFOV)
if infov:
infov.in_fov = key in new_lightmap
# if renderable, manage sprites/memory
renderable = entity.get(Renderable)
if not renderable:
continue
# if object is lit, show its sprite
sprite = self._sprites[entity]
if lit:
sprite.visible = True
else:
sprite.visible = False
# if it's memorable, add its current image to the memory
if renderable.memorable:
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(renderable.image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
memory.append(sprite)
# update light overlay
self._light_overlay.update_light(new_lightmap, self._memory)
def add_entity(self, entity):
image = entity.get(Renderable).image
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
self._sprites[entity] = sprite
entity.listen('image_change', self._on_image_change)
entity.listen('move', self._on_move)
def remove_entity(self, entity):
sprite = self._sprites.pop(entity)
sprite.delete()
entity.unlisten('image_change', self._on_image_change)
entity.unlisten('move', self._on_move)
def _on_image_change(self, entity):
self._sprites[entity].image = entity.get(Renderable).image
def _on_move(self, entity, old_x, old_y, new_x, new_y):
sprite = self._sprites[entity]
target_x = new_x * 8
target_y = new_y * 8
if not sprite.visible:
# don't animate invisible sprites
sprite.set_position(target_x, target_y)
else:
start_x = sprite.x
start_y = sprite.y
anim = Animation(0.25)
@anim.event
def on_update(animation, dt, sprite=sprite, dx=(target_x - start_x), dy=(target_y - start_y)):
ratio = animation.get_elapsed_ratio()
x = round(start_x + dx * ratio)
y = round(start_y + dy * ratio)
sprite.set_position(x, y)
@anim.event
def on_finish(animation, sprite=sprite):
sprite.set_position(target_x, target_y)
self.add_animation(anim)
def draw(self):
self._window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
self._batch.draw()
def dispose(self):
for anim in tuple(self._animations):
anim.cancel()
assert not self._animations
for sprite in self._sprites.values():
sprite.delete()
self._sprites.clear()
for sprites in self._memory.values():
for sprite in sprites:
sprite.delete()
self._memory.clear()
if self._level_vlist:
self._level_vlist.delete()
self._level_vlist = None
if self._light_overlay:
self._light_overlay.delete()
self._light_overlay = None
self._last_messages_view.delete()
self._hud.delete()
def add_animation(self, animation):
self._animations.add(animation)
animation.push_handlers(on_finish=self._animations.remove)
def animate_damage(self, x, y, dmg):
x = (x * 8 + random.randint(2, 6)) * self.zoom
start_y = (y * 8 + random.randint(0, 4)) * self.zoom
label = pyglet.text.Label('-' + str(dmg), font_name='eight2empire', color=(255, 0, 0, 255),
x=x, y=start_y, anchor_x='center', anchor_y='bottom',
batch=self._batch, group=self._digits_group)
anim = Animation(1)
@anim.event
def on_update(animation, dt, label=label, start_y=start_y, zoom=self.zoom):
ratio = animation.get_elapsed_ratio()
label.y = start_y + 12 * ratio * zoom
label.color = (255, 0, 0, int((1.0 - ratio) * 255))
@anim.event
def on_finish(animation, label=label):
label.delete()
self.add_animation(anim)
| renderable = entity.get(LayoutRenderable)
if renderable:
tile = renderable.tile
break | conditional_block |
render.py | import collections
import random
import pyglet
from entity import Component
from fov import InFOV
from generator import LayoutGenerator
from hud import HUD
from light import LightOverlay
from message import LastMessagesView
from position import Position
from temp import floor_tex, get_wall_tex, dungeon_tex
from util import event_property
class TextureGroup(pyglet.graphics.TextureGroup):
"""A batch group that binds texture and sets mag filter to NEAREST not to screw our pretty pixel art"""
def set_state(self):
super(TextureGroup, self).set_state()
pyglet.gl.glTexParameteri(self.texture.target, pyglet.gl.GL_TEXTURE_MAG_FILTER, pyglet.gl.GL_NEAREST)
class ZoomGroup(pyglet.graphics.Group):
def __init__(self, zoom, parent=None):
super(ZoomGroup, self).__init__(parent)
self.zoom = zoom
def set_state(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glScalef(self.zoom, self.zoom, 1)
def unset_state(self):
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.zoom == other.zoom and
self.parent == other.parent
)
def __hash__(self):
return hash((self.zoom, self.parent))
def __repr__(self):
return '%s(zoom=%d)' % (self.__class__.__name__, self.zoom)
class CameraGroup(pyglet.graphics.Group):
def __init__(self, window, zoom_factor, focus=None, parent=None):
super(CameraGroup, self).__init__(parent)
self.window = window
self.zoom_factor = zoom_factor
self.focus = focus
def set_state(self):
if self.focus is not None:
cam_x = self.window.width / 2 - self.focus.x * self.zoom_factor
cam_y = self.window.height / 2 - self.focus.y * self.zoom_factor
pyglet.gl.gl.glPushMatrix()
pyglet.gl.gl.glTranslatef(cam_x, cam_y, 0)
def unset_state(self):
if self.focus is not None:
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.window is other.window and
self.zoom_factor == other.zoom_factor and
self.parent == other.parent
)
def __hash__(self):
return hash((self.window, self.zoom_factor, self.parent))
class Animation(pyglet.event.EventDispatcher):
def __init__(self, duration):
self.elapsed = 0.0
self.duration = duration
pyglet.clock.schedule_interval(self._animate, 0.001)
def cancel(self):
pyglet.clock.unschedule(self._animate)
self.dispatch_event('on_finish', self)
def get_elapsed_ratio(self):
return self.elapsed / self.duration
def _animate(self, dt):
self.elapsed += dt
if self.elapsed > self.duration:
self.cancel()
else:
self.dispatch_event('on_update', self, dt)
Animation.register_event_type('on_update')
Animation.register_event_type('on_finish')
class Renderable(Component):
COMPONENT_NAME = 'renderable'
def __init__(self, image, memorable=False):
self._image = image
self.memorable = memorable
image = event_property('_image', 'image_change')
class | (Component):
COMPONENT_NAME = 'layout_renderable'
def __init__(self, tile):
self.tile = tile
class RenderSystem(object):
zoom = 3
GROUP_LEVEL = pyglet.graphics.OrderedGroup(0)
GROUP_DIGITS = pyglet.graphics.OrderedGroup(1)
GROUP_HUD = pyglet.graphics.OrderedGroup(2)
def __init__(self, level):
self._level = level
self._window = level.game.game.window
self._batch = pyglet.graphics.Batch()
self._animations = set()
self._sprites = {}
self._level_vlist = None
self._light_overlay = None
self._last_messages_view = LastMessagesView(level.game.message_log, self._window.width, self._window.height, batch=self._batch, group=self.GROUP_HUD)
self._hud = HUD(batch=self._batch, group=self.GROUP_HUD)
self._level_group = ZoomGroup(self.zoom, CameraGroup(self._window, self.zoom, self.GROUP_LEVEL))
self._digits_group = CameraGroup(self._window, self.zoom, self.GROUP_DIGITS)
self._memory = collections.defaultdict(list)
def update_player(self):
player_sprite = self._sprites[self._level.player]
self._digits_group.focus = player_sprite
self._level_group.parent.focus = player_sprite
self._hud.player = self._level.player
def render_level(self):
vertices = []
tex_coords = []
for x in xrange(self._level.size_x):
for y in xrange(self._level.size_y):
x1 = x * 8
x2 = x1 + 8
y1 = y * 8
y2 = y1 + 8
for entity in self._level.position_system.get_entities_at(x, y):
renderable = entity.get(LayoutRenderable)
if renderable:
tile = renderable.tile
break
else:
continue
# always add floor, because we wanna draw walls above floor
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(floor_tex.tex_coords)
if tile == LayoutGenerator.TILE_WALL:
# if we got wall, draw it above floor
tex = get_wall_tex(self._level.get_wall_transition(x, y))
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(tex.tex_coords)
group = TextureGroup(dungeon_tex, pyglet.graphics.OrderedGroup(Position.ORDER_FLOOR, self._level_group))
self._level_vlist = self._batch.add(len(vertices) / 2, pyglet.gl.GL_QUADS, group,
('v2i/static', vertices),
('t3f/statc', tex_coords),
)
group = pyglet.graphics.OrderedGroup(Position.ORDER_PLAYER + 1, self._level_group)
self._light_overlay = LightOverlay(self._level.size_x, self._level.size_y, self._batch, group)
def update_light(self, old_lightmap, new_lightmap):
# for all changed cells
for key in set(old_lightmap).union(new_lightmap):
lit = key in new_lightmap
memory = self._memory[key]
# if cell is lit, add it to memory and clear all memory sprites, if there are any
if lit:
for sprite in memory:
sprite.delete()
memory[:] = []
# for every entity in cell
for entity in self._level.position_system.get_entities_at(*key):
# set in_fov flag
# TODO: this doesnt belong to rendering, but i don't want to loop twice
infov = entity.get(InFOV)
if infov:
infov.in_fov = key in new_lightmap
# if renderable, manage sprites/memory
renderable = entity.get(Renderable)
if not renderable:
continue
# if object is lit, show its sprite
sprite = self._sprites[entity]
if lit:
sprite.visible = True
else:
sprite.visible = False
# if it's memorable, add its current image to the memory
if renderable.memorable:
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(renderable.image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
memory.append(sprite)
# update light overlay
self._light_overlay.update_light(new_lightmap, self._memory)
def add_entity(self, entity):
image = entity.get(Renderable).image
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
self._sprites[entity] = sprite
entity.listen('image_change', self._on_image_change)
entity.listen('move', self._on_move)
def remove_entity(self, entity):
sprite = self._sprites.pop(entity)
sprite.delete()
entity.unlisten('image_change', self._on_image_change)
entity.unlisten('move', self._on_move)
def _on_image_change(self, entity):
self._sprites[entity].image = entity.get(Renderable).image
def _on_move(self, entity, old_x, old_y, new_x, new_y):
sprite = self._sprites[entity]
target_x = new_x * 8
target_y = new_y * 8
if not sprite.visible:
# don't animate invisible sprites
sprite.set_position(target_x, target_y)
else:
start_x = sprite.x
start_y = sprite.y
anim = Animation(0.25)
@anim.event
def on_update(animation, dt, sprite=sprite, dx=(target_x - start_x), dy=(target_y - start_y)):
ratio = animation.get_elapsed_ratio()
x = round(start_x + dx * ratio)
y = round(start_y + dy * ratio)
sprite.set_position(x, y)
@anim.event
def on_finish(animation, sprite=sprite):
sprite.set_position(target_x, target_y)
self.add_animation(anim)
def draw(self):
self._window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
self._batch.draw()
def dispose(self):
for anim in tuple(self._animations):
anim.cancel()
assert not self._animations
for sprite in self._sprites.values():
sprite.delete()
self._sprites.clear()
for sprites in self._memory.values():
for sprite in sprites:
sprite.delete()
self._memory.clear()
if self._level_vlist:
self._level_vlist.delete()
self._level_vlist = None
if self._light_overlay:
self._light_overlay.delete()
self._light_overlay = None
self._last_messages_view.delete()
self._hud.delete()
def add_animation(self, animation):
self._animations.add(animation)
animation.push_handlers(on_finish=self._animations.remove)
def animate_damage(self, x, y, dmg):
x = (x * 8 + random.randint(2, 6)) * self.zoom
start_y = (y * 8 + random.randint(0, 4)) * self.zoom
label = pyglet.text.Label('-' + str(dmg), font_name='eight2empire', color=(255, 0, 0, 255),
x=x, y=start_y, anchor_x='center', anchor_y='bottom',
batch=self._batch, group=self._digits_group)
anim = Animation(1)
@anim.event
def on_update(animation, dt, label=label, start_y=start_y, zoom=self.zoom):
ratio = animation.get_elapsed_ratio()
label.y = start_y + 12 * ratio * zoom
label.color = (255, 0, 0, int((1.0 - ratio) * 255))
@anim.event
def on_finish(animation, label=label):
label.delete()
self.add_animation(anim)
| LayoutRenderable | identifier_name |
hashsplit.go | // Package hashsplit implements content-based splitting of byte streams.
package hashsplit
import (
"errors"
"io"
"math/bits"
"github.com/chmduquesne/rollinghash/buzhash32"
)
const (
defaultSplitBits = 13
windowSize = 64
defaultMinSize = windowSize
)
// Splitter hashsplits a byte sequence into chunks.
// It implements the io.WriteCloser interface.
// Create a new Splitter with NewSplitter.
//
// Hashsplitting is a way of dividing a byte stream into pieces
// based on the stream's content rather than on any predetermined chunk size.
// As the Splitter reads the stream it maintains a rolling checksum of the last several bytes.
// A chunk boundary occurs when the rolling checksum has enough trailing bits set to zero
// (where "enough" is a configurable setting that determines the average chunk size).
//
// Hashsplitting has benefits when it comes to representing multiple,
// slightly different versions of the same data.
// Consider, for example, the problem of adding EXIF tags to a JPEG image file.
// The tags appear near the beginning of the file, and the bulk of the image data follows.
// If the file were divided into chunks at (say) 8-kilobyte boundaries,
// then adding EXIF data near the beginning would alter every following chunk,
// except in the lucky case where the size of the added data is an exact multiple of 8kb.
// With hashsplitting, only the chunks in the vicinity of the change are affected.
//
// Hashsplitting is used to dramatically reduce storage and bandwidth requirements
// in projects like rsync, bup, and perkeep.
type Splitter struct {
// MinSize is the minimum chunk size.
// Only the final chunk may be smaller than this.
// This should always be >= 64,
// which is the rolling checksum "window size."
// If it's less than the size of the checksum window,
// then the same window can span multiple chunks,
// meaning a chunk boundary is not independent of the preceding chunk.
// If you leave this set to zero,
// 64 is what you'll get.
// If you really mean "I want no minimum,"
// set this to 1.
MinSize int
// SplitBits is the number of trailing zero bits in the rolling checksum required to produce a chunk.
// The default (what you get if you leave it set to zero) is 13,
// which means a chunk boundary occurs on average once every 8,192 bytes.
//
// (But thanks to math, that doesn't mean that 8,192 is the median chunk size.
// The median chunk size is actually the logarithm, base (2^SplitBits-1)/(2^SplitBits), of 0.5.
// That makes the median chunk size 5,678 when SplitBits==13.)
SplitBits uint
// The function to invoke on each chunk produced.
f func([]byte, uint) error
// The chunk being built.
chunk []byte
// This is the recommended rolling-checksum algorithm for hashsplitting
// according to the document at github.com/hashsplit/hashsplit-spec
// (presently in draft form).
rs *buzhash32.Buzhash32
}
// Split hashsplits its input using a default Splitter and the given callback to process chunks.
// See NewSplitter for details about the callback.
func | (r io.Reader, f func([]byte, uint) error) error {
s := NewSplitter(f)
_, err := io.Copy(s, r)
if err != nil {
return err
}
return s.Close()
}
// NewSplitter produces a new Splitter with the given callback.
// The Splitter is an io.WriteCloser.
// As bytes are written to it,
// it finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = &TreeBuilderNode{
offset: tb.levels[i+1].offset + tb.levels[i+1].size,
}
}
return nil
}
// Root produces the root of the tree after all nodes have been added with calls to Add.
// Root may only be called one time.
// If the tree is empty,
// Root returns a nil Node.
// It is an error to call Add after a call to Root.
//
// The return value of Root is the interface type Node.
// If tb.F is nil, the concrete type will be *TreeBuilderNode.
func (tb *TreeBuilder) Root() (Node, error) {
if len(tb.levels) == 0 {
return nil, nil
}
if len(tb.levels[0].Chunks) > 0 {
for i := 0; i < len(tb.levels)-1; i++ {
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return nil, err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = nil // help the gc reclaim memory sooner, maybe
}
}
// Don't necessarily return the highest node in tb.levels.
// We can prune any would-be root nodes that have only one child.
// If we _are_ going to return tb.levels[len(tb.levels)-1],
// we have to call tb.F on it.
// If we're not, we don't:
// tb.F has already been called on all other nodes.
if len(tb.levels) == 1 {
var result Node = tb.levels[0]
if tb.F != nil {
return tb.F(tb.levels[0])
}
return result, nil
}
top := tb.levels[len(tb.levels)-1]
if len(top.Nodes) > 1 {
if tb.F != nil {
return tb.F(top)
}
return top, nil
}
var (
root Node = top
err error
)
for root.NumChildren() == 1 {
root, err = root.Child(0)
if err != nil {
return nil, err
}
}
return root, nil
}
// ErrNotFound is the error returned by Seek when the seek position lies outside the given node's range.
var ErrNotFound = errors.New("not found")
// Seek finds the level-0 node representing the given byte position
// (i.e., the one where Offset <= pos < Offset+Size).
func Seek(n Node, pos uint64) (Node, error) {
if pos < n.Offset() || pos >= (n.Offset()+n.Size()) {
return nil, ErrNotFound
}
num := n.NumChildren()
if num == 0 {
return n, nil
}
// TODO: if a Node kept track of its children's offsets,
// this loop could be replaced with a sort.Search call.
for i := 0; i < num; i++ {
child, err := n.Child(i)
if err != nil {
return nil, err
}
if pos >= (child.Offset() + child.Size()) {
continue
}
return Seek(child, pos)
}
// With a properly formed tree of nodes this will not be reached.
return nil, ErrNotFound
}
| Split | identifier_name |
hashsplit.go | // Package hashsplit implements content-based splitting of byte streams.
package hashsplit
import (
"errors"
"io"
"math/bits"
"github.com/chmduquesne/rollinghash/buzhash32"
)
const (
defaultSplitBits = 13
windowSize = 64
defaultMinSize = windowSize
)
// Splitter hashsplits a byte sequence into chunks.
// It implements the io.WriteCloser interface.
// Create a new Splitter with NewSplitter.
//
// Hashsplitting is a way of dividing a byte stream into pieces
// based on the stream's content rather than on any predetermined chunk size.
// As the Splitter reads the stream it maintains a rolling checksum of the last several bytes.
// A chunk boundary occurs when the rolling checksum has enough trailing bits set to zero
// (where "enough" is a configurable setting that determines the average chunk size).
//
// Hashsplitting has benefits when it comes to representing multiple,
// slightly different versions of the same data.
// Consider, for example, the problem of adding EXIF tags to a JPEG image file.
// The tags appear near the beginning of the file, and the bulk of the image data follows.
// If the file were divided into chunks at (say) 8-kilobyte boundaries,
// then adding EXIF data near the beginning would alter every following chunk,
// except in the lucky case where the size of the added data is an exact multiple of 8kb.
// With hashsplitting, only the chunks in the vicinity of the change are affected.
//
// Hashsplitting is used to dramatically reduce storage and bandwidth requirements
// in projects like rsync, bup, and perkeep.
type Splitter struct {
// MinSize is the minimum chunk size.
// Only the final chunk may be smaller than this.
// This should always be >= 64,
// which is the rolling checksum "window size."
// If it's less than the size of the checksum window,
// then the same window can span multiple chunks,
// meaning a chunk boundary is not independent of the preceding chunk.
// If you leave this set to zero,
// 64 is what you'll get.
// If you really mean "I want no minimum,"
// set this to 1.
MinSize int
// SplitBits is the number of trailing zero bits in the rolling checksum required to produce a chunk.
// The default (what you get if you leave it set to zero) is 13,
// which means a chunk boundary occurs on average once every 8,192 bytes.
//
// (But thanks to math, that doesn't mean that 8,192 is the median chunk size.
// The median chunk size is actually the logarithm, base (2^SplitBits-1)/(2^SplitBits), of 0.5.
// That makes the median chunk size 5,678 when SplitBits==13.)
SplitBits uint
// The function to invoke on each chunk produced.
f func([]byte, uint) error
// The chunk being built.
chunk []byte
// This is the recommended rolling-checksum algorithm for hashsplitting
// according to the document at github.com/hashsplit/hashsplit-spec
// (presently in draft form).
rs *buzhash32.Buzhash32
}
// Split hashsplits its input using a default Splitter and the given callback to process chunks.
// See NewSplitter for details about the callback.
func Split(r io.Reader, f func([]byte, uint) error) error {
s := NewSplitter(f)
_, err := io.Copy(s, r)
if err != nil {
return err
}
return s.Close()
}
// NewSplitter produces a new Splitter with the given callback.
// The Splitter is an io.WriteCloser.
// As bytes are written to it,
// it finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = &TreeBuilderNode{
offset: tb.levels[i+1].offset + tb.levels[i+1].size,
}
}
return nil
}
// Root produces the root of the tree after all nodes have been added with calls to Add.
// Root may only be called one time.
// If the tree is empty,
// Root returns a nil Node.
// It is an error to call Add after a call to Root.
//
// The return value of Root is the interface type Node.
// If tb.F is nil, the concrete type will be *TreeBuilderNode.
func (tb *TreeBuilder) Root() (Node, error) {
if len(tb.levels) == 0 {
return nil, nil
}
if len(tb.levels[0].Chunks) > 0 {
for i := 0; i < len(tb.levels)-1; i++ {
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return nil, err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = nil // help the gc reclaim memory sooner, maybe
}
}
// Don't necessarily return the highest node in tb.levels.
// We can prune any would-be root nodes that have only one child.
// If we _are_ going to return tb.levels[len(tb.levels)-1],
// we have to call tb.F on it.
// If we're not, we don't:
// tb.F has already been called on all other nodes.
if len(tb.levels) == 1 {
var result Node = tb.levels[0]
if tb.F != nil {
return tb.F(tb.levels[0])
}
return result, nil
}
top := tb.levels[len(tb.levels)-1]
if len(top.Nodes) > 1 {
if tb.F != nil {
return tb.F(top)
}
return top, nil
}
var (
root Node = top
err error
)
for root.NumChildren() == 1 {
root, err = root.Child(0)
if err != nil {
return nil, err
}
}
return root, nil
}
// ErrNotFound is the error returned by Seek when the seek position lies outside the given node's range.
var ErrNotFound = errors.New("not found")
// Seek finds the level-0 node representing the given byte position
// (i.e., the one where Offset <= pos < Offset+Size).
func Seek(n Node, pos uint64) (Node, error) {
if pos < n.Offset() || pos >= (n.Offset()+n.Size()) |
num := n.NumChildren()
if num == 0 {
return n, nil
}
// TODO: if a Node kept track of its children's offsets,
// this loop could be replaced with a sort.Search call.
for i := 0; i < num; i++ {
child, err := n.Child(i)
if err != nil {
return nil, err
}
if pos >= (child.Offset() + child.Size()) {
continue
}
return Seek(child, pos)
}
// With a properly formed tree of nodes this will not be reached.
return nil, ErrNotFound
}
| {
return nil, ErrNotFound
} | conditional_block |
hashsplit.go | // Package hashsplit implements content-based splitting of byte streams.
package hashsplit
import (
"errors"
"io"
"math/bits"
"github.com/chmduquesne/rollinghash/buzhash32"
)
const (
defaultSplitBits = 13
windowSize = 64
defaultMinSize = windowSize
)
// Splitter hashsplits a byte sequence into chunks.
// It implements the io.WriteCloser interface.
// Create a new Splitter with NewSplitter.
//
// Hashsplitting is a way of dividing a byte stream into pieces
// based on the stream's content rather than on any predetermined chunk size.
// As the Splitter reads the stream it maintains a rolling checksum of the last several bytes.
// A chunk boundary occurs when the rolling checksum has enough trailing bits set to zero
// (where "enough" is a configurable setting that determines the average chunk size).
//
// Hashsplitting has benefits when it comes to representing multiple,
// slightly different versions of the same data.
// Consider, for example, the problem of adding EXIF tags to a JPEG image file.
// The tags appear near the beginning of the file, and the bulk of the image data follows.
// If the file were divided into chunks at (say) 8-kilobyte boundaries,
// then adding EXIF data near the beginning would alter every following chunk,
// except in the lucky case where the size of the added data is an exact multiple of 8kb.
// With hashsplitting, only the chunks in the vicinity of the change are affected.
//
// Hashsplitting is used to dramatically reduce storage and bandwidth requirements
// in projects like rsync, bup, and perkeep.
type Splitter struct {
// MinSize is the minimum chunk size.
// Only the final chunk may be smaller than this.
// This should always be >= 64,
// which is the rolling checksum "window size."
// If it's less than the size of the checksum window,
// then the same window can span multiple chunks,
// meaning a chunk boundary is not independent of the preceding chunk.
// If you leave this set to zero,
// 64 is what you'll get.
// If you really mean "I want no minimum,"
// set this to 1.
MinSize int
// SplitBits is the number of trailing zero bits in the rolling checksum required to produce a chunk.
// The default (what you get if you leave it set to zero) is 13,
// which means a chunk boundary occurs on average once every 8,192 bytes.
//
// (But thanks to math, that doesn't mean that 8,192 is the median chunk size.
// The median chunk size is actually the logarithm, base (2^SplitBits-1)/(2^SplitBits), of 0.5.
// That makes the median chunk size 5,678 when SplitBits==13.)
SplitBits uint
// The function to invoke on each chunk produced.
f func([]byte, uint) error
// The chunk being built.
chunk []byte
// This is the recommended rolling-checksum algorithm for hashsplitting
// according to the document at github.com/hashsplit/hashsplit-spec
// (presently in draft form).
rs *buzhash32.Buzhash32
}
// Split hashsplits its input using a default Splitter and the given callback to process chunks.
// See NewSplitter for details about the callback.
func Split(r io.Reader, f func([]byte, uint) error) error {
s := NewSplitter(f)
_, err := io.Copy(s, r)
if err != nil {
return err
}
return s.Close()
}
// NewSplitter produces a new Splitter with the given callback.
// The Splitter is an io.WriteCloser.
// As bytes are written to it,
// it finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error |
// Root produces the root of the tree after all nodes have been added with calls to Add.
// Root may only be called one time.
// If the tree is empty,
// Root returns a nil Node.
// It is an error to call Add after a call to Root.
//
// The return value of Root is the interface type Node.
// If tb.F is nil, the concrete type will be *TreeBuilderNode.
func (tb *TreeBuilder) Root() (Node, error) {
if len(tb.levels) == 0 {
return nil, nil
}
if len(tb.levels[0].Chunks) > 0 {
for i := 0; i < len(tb.levels)-1; i++ {
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return nil, err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = nil // help the gc reclaim memory sooner, maybe
}
}
// Don't necessarily return the highest node in tb.levels.
// We can prune any would-be root nodes that have only one child.
// If we _are_ going to return tb.levels[len(tb.levels)-1],
// we have to call tb.F on it.
// If we're not, we don't:
// tb.F has already been called on all other nodes.
if len(tb.levels) == 1 {
var result Node = tb.levels[0]
if tb.F != nil {
return tb.F(tb.levels[0])
}
return result, nil
}
top := tb.levels[len(tb.levels)-1]
if len(top.Nodes) > 1 {
if tb.F != nil {
return tb.F(top)
}
return top, nil
}
var (
root Node = top
err error
)
for root.NumChildren() == 1 {
root, err = root.Child(0)
if err != nil {
return nil, err
}
}
return root, nil
}
// ErrNotFound is the error returned by Seek when the seek position lies outside the given node's range.
var ErrNotFound = errors.New("not found")
// Seek finds the level-0 node representing the given byte position
// (i.e., the one where Offset <= pos < Offset+Size).
func Seek(n Node, pos uint64) (Node, error) {
if pos < n.Offset() || pos >= (n.Offset()+n.Size()) {
return nil, ErrNotFound
}
num := n.NumChildren()
if num == 0 {
return n, nil
}
// TODO: if a Node kept track of its children's offsets,
// this loop could be replaced with a sort.Search call.
for i := 0; i < num; i++ {
child, err := n.Child(i)
if err != nil {
return nil, err
}
if pos >= (child.Offset() + child.Size()) {
continue
}
return Seek(child, pos)
}
// With a properly formed tree of nodes this will not be reached.
return nil, ErrNotFound
}
| {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = &TreeBuilderNode{
offset: tb.levels[i+1].offset + tb.levels[i+1].size,
}
}
return nil
} | identifier_body |
hashsplit.go | // Package hashsplit implements content-based splitting of byte streams.
package hashsplit
import (
"errors"
"io"
"math/bits"
"github.com/chmduquesne/rollinghash/buzhash32"
)
const (
defaultSplitBits = 13
windowSize = 64
defaultMinSize = windowSize
)
// Splitter hashsplits a byte sequence into chunks.
// It implements the io.WriteCloser interface.
// Create a new Splitter with NewSplitter.
//
// Hashsplitting is a way of dividing a byte stream into pieces
// based on the stream's content rather than on any predetermined chunk size.
// As the Splitter reads the stream it maintains a rolling checksum of the last several bytes.
// A chunk boundary occurs when the rolling checksum has enough trailing bits set to zero
// (where "enough" is a configurable setting that determines the average chunk size).
//
// Hashsplitting has benefits when it comes to representing multiple,
// slightly different versions of the same data.
// Consider, for example, the problem of adding EXIF tags to a JPEG image file.
// The tags appear near the beginning of the file, and the bulk of the image data follows.
// If the file were divided into chunks at (say) 8-kilobyte boundaries,
// then adding EXIF data near the beginning would alter every following chunk,
// except in the lucky case where the size of the added data is an exact multiple of 8kb.
// With hashsplitting, only the chunks in the vicinity of the change are affected.
//
// Hashsplitting is used to dramatically reduce storage and bandwidth requirements
// in projects like rsync, bup, and perkeep.
type Splitter struct {
// MinSize is the minimum chunk size.
// Only the final chunk may be smaller than this.
// This should always be >= 64,
// which is the rolling checksum "window size."
// If it's less than the size of the checksum window,
// then the same window can span multiple chunks,
// meaning a chunk boundary is not independent of the preceding chunk.
// If you leave this set to zero,
// 64 is what you'll get.
// If you really mean "I want no minimum,"
// set this to 1.
MinSize int
// SplitBits is the number of trailing zero bits in the rolling checksum required to produce a chunk.
// The default (what you get if you leave it set to zero) is 13,
// which means a chunk boundary occurs on average once every 8,192 bytes.
//
// (But thanks to math, that doesn't mean that 8,192 is the median chunk size.
// The median chunk size is actually the logarithm, base (2^SplitBits-1)/(2^SplitBits), of 0.5.
// That makes the median chunk size 5,678 when SplitBits==13.)
SplitBits uint
// The function to invoke on each chunk produced.
f func([]byte, uint) error
// The chunk being built.
chunk []byte
// This is the recommended rolling-checksum algorithm for hashsplitting
// according to the document at github.com/hashsplit/hashsplit-spec
// (presently in draft form).
rs *buzhash32.Buzhash32
}
// Split hashsplits its input using a default Splitter and the given callback to process chunks.
// See NewSplitter for details about the callback.
func Split(r io.Reader, f func([]byte, uint) error) error {
s := NewSplitter(f)
_, err := io.Copy(s, r)
if err != nil {
return err
}
return s.Close()
}
// NewSplitter produces a new Splitter with the given callback.
// The Splitter is an io.WriteCloser.
// As bytes are written to it,
// it finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = &TreeBuilderNode{
offset: tb.levels[i+1].offset + tb.levels[i+1].size,
}
}
return nil
}
// Root produces the root of the tree after all nodes have been added with calls to Add.
// Root may only be called one time.
// If the tree is empty,
// Root returns a nil Node.
// It is an error to call Add after a call to Root.
//
// The return value of Root is the interface type Node.
// If tb.F is nil, the concrete type will be *TreeBuilderNode.
func (tb *TreeBuilder) Root() (Node, error) {
if len(tb.levels) == 0 { | var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return nil, err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = nil // help the gc reclaim memory sooner, maybe
}
}
// Don't necessarily return the highest node in tb.levels.
// We can prune any would-be root nodes that have only one child.
// If we _are_ going to return tb.levels[len(tb.levels)-1],
// we have to call tb.F on it.
// If we're not, we don't:
// tb.F has already been called on all other nodes.
if len(tb.levels) == 1 {
var result Node = tb.levels[0]
if tb.F != nil {
return tb.F(tb.levels[0])
}
return result, nil
}
top := tb.levels[len(tb.levels)-1]
if len(top.Nodes) > 1 {
if tb.F != nil {
return tb.F(top)
}
return top, nil
}
var (
root Node = top
err error
)
for root.NumChildren() == 1 {
root, err = root.Child(0)
if err != nil {
return nil, err
}
}
return root, nil
}
// ErrNotFound is the error returned by Seek when the seek position lies outside the given node's range.
var ErrNotFound = errors.New("not found")
// Seek finds the level-0 node representing the given byte position
// (i.e., the one where Offset <= pos < Offset+Size).
func Seek(n Node, pos uint64) (Node, error) {
if pos < n.Offset() || pos >= (n.Offset()+n.Size()) {
return nil, ErrNotFound
}
num := n.NumChildren()
if num == 0 {
return n, nil
}
// TODO: if a Node kept track of its children's offsets,
// this loop could be replaced with a sort.Search call.
for i := 0; i < num; i++ {
child, err := n.Child(i)
if err != nil {
return nil, err
}
if pos >= (child.Offset() + child.Size()) {
continue
}
return Seek(child, pos)
}
// With a properly formed tree of nodes this will not be reached.
return nil, ErrNotFound
} | return nil, nil
}
if len(tb.levels[0].Chunks) > 0 {
for i := 0; i < len(tb.levels)-1; i++ { | random_line_split |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng + ?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn | <R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng + ?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m != u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
}
| sample | identifier_name |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng + ?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng + ?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint |
}
| {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m != u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
} | identifier_body |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng + ?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are | lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng + ?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m != u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
} | /// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")] | random_line_split |
bigrand.rs | //! Randomization of big integers
use rand::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use rand::prelude::*;
use rand::Rng;
use crate::BigInt;
use crate::BigUint;
use crate::Sign::*;
use crate::big_digit::BigDigit;
use crate::bigint::{into_magnitude, magnitude};
use crate::integer::Integer;
#[cfg(feature = "prime")]
use num_iter::range_step;
use num_traits::Zero;
#[cfg(feature = "prime")]
use num_traits::{FromPrimitive, ToPrimitive};
#[cfg(feature = "prime")]
use crate::prime::probably_prime;
pub trait RandBigInt {
/// Generate a random `BigUint` of the given bit size.
fn gen_biguint(&mut self, bit_size: usize) -> BigUint;
/// Generate a random BigInt of the given bit size.
fn gen_bigint(&mut self, bit_size: usize) -> BigInt;
/// Generate a random `BigUint` less than the given bound. Fails
/// when the bound is zero.
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint;
/// Generate a random `BigUint` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng + ?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng + ?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 | else {
// Here b==1, because b cannot be zero.
bytes[0] |= 1;
if bytes_len > 1 {
bytes[1] |= 0x80;
}
}
// Make the value odd since an even number this large certainly isn't prime.
bytes[bytes_len - 1] |= 1u8;
let mut p = BigUint::from_bytes_be(&bytes);
// must always be a u64, as the SMALL_PRIMES_PRODUCT is a u64
let rem = (&p % &*SMALL_PRIMES_PRODUCT).to_u64().unwrap();
'next: for delta in range_step(0, 1 << 20, 2) {
let m = rem + delta;
for prime in &SMALL_PRIMES {
if m % u64::from(*prime) == 0 && (bit_size > 6 || m != u64::from(*prime)) {
continue 'next;
}
}
if delta > 0 {
p += BigUint::from_u64(delta).unwrap();
}
break;
}
// There is a tiny possibility that, by adding delta, we caused
// the number to be one bit too long. Thus we check bit length here.
if p.bits() == bit_size && probably_prime(&p, 20) {
return p;
}
}
}
}
| {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} | conditional_block |
cartracker.py | import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
import imutils
from collections import deque
import argparse
import pandas as pd
import random
cap = cv2.VideoCapture('cardi.MP4')
ap = argparse.ArgumentParser()
| # create background subtractor
fgbg = cv2.createBackgroundSubtractorMOG2()
# where the centroids will be stored
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
#setting variables before the image processing
frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = int(width)
height = int(height)
print(frames_count, fps, width, height)
# creates a pandas data frame with the number of rows the same length as frame count
df = pd.DataFrame(index=range(int(frames_count)))
df.index.name = "Frames"
framenumber = 0 # keeps track of current frame
carids = [] # blank list to add car ids
totalcars = 0 # keeps track of total cars
#capturing data
while(True):
# Capture two frames
ret, frame1 = cap.read() # first image
time.sleep(1/25) # slight delay
ret, frame2 = cap.read() # second image
image = cv2.resize(frame1, (0, 0), None, 1,1)
#getting the difference as the basis for movement
diff = cv2.absdiff(frame1,frame2)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
th =25
imask = mask > th
canvas = np.zeros_like(frame2, np.uint8)
canvas[imask] = frame1[imask]
mask = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
#canvas = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# transforms
fgmask = fgbg.apply(mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30,30))
#dilation = cv2.dilate(fgmask, kernel)
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
#opening = cv2.morphologyEx(closing, cv2.MORPH_ERODE, kernel)
mask =closing
# variable for contours
ret,thresh = cv2.threshold(mask,0,255,0)
# creates contours/blobs
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# use convex hull to create polygon around contours
hull = [cv2.convexHull(c) for c in contours]
# draw contours
cv2.drawContours(mask, hull, -1, (0, 255, 0), 2)
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
# line created to stop counting contours, needed as cars in distance become one big contour
lineypos = 400
cv2.line(image, (-100, lineypos), (width, -120), (255, 0, 0), 3) # blue
lineypos2 = -700
cv2.line(image, (-150, lineypos2), (width, 700), (0, 255, 0), 3) # green
cv2.line(image, (-150, -100), (width, 1800), (255, 255,0), 3)
#creating centroids and boxes
for j in range(len(contours)):
if hierarchy[0, j, 3] == -1:
cnt=contours[j]
area = cv2.contourArea(cnt)
if 500 < area < 50000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
#getting variables for the centroids
cx = int(x + w/2)
cy = int(y + h/2)
cen = (cx,cy)
cv2.circle(image, (cx,cy), 7, (255,0,0), -1)
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
.5, (0, 0, 255), 1)
cxx[j] = cx
cyy[j] = cy
pts.appendleft(cen)
#this is for plotting the past centroid positions
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# draw the centroid tracker
cv2.circle(image, (pts[i - 1]), 2, (0,0,255), -1)
#drawing hte current centroid
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
minx_index2 = []
miny_index2 = []
maxrad = 30
# if there are centroids in the specified area
if len(cxx):
if not carids: # if carids is empty
# loops through all centroids
for i in range(len(cxx)):
carids.append(i)
df[str(carids[i])] = ""
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1
else:
dx = np.zeros((len(cxx), len(carids)))
dy = np.zeros((len(cyy), len(carids)))
for i in range(len(cxx)):
for j in range(len(carids)):
# acquires centroid from previous frame for specific carid
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# acquires current frame centroid that doesn't necessarily line up with previous frame centroid
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows
continue # continue to next carid
else: # calculate centroid deltas to compare to current frame position later
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # loops through all current car ids
sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids
# finds which index carid had the min difference and this is true index
correctindextrue = np.argmin(np.abs(sumsum))
minx_index = correctindextrue
miny_index = correctindextrue
# acquires delta values of the minimum deltas in order to check if it is within radius later on
mindx = dx[minx_index, j]
mindy = dy[miny_index, j]
if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# checks if minimum value is 0 and checks if all deltas are zero since this is empty set
# delta could be zero if centroid didn't move
continue # continue to next carid
else:
# if delta values are less than maximum radius then add that centroid to that specific carid
if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:
# adds centroid to corresponding previously existing carid
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # appends all the indices that were added to previous carids
miny_index2.append(miny_index)
for i in range(len(cxx)): # loops through all centroids
# if centroid is not in the minindex list then another car needs to be added
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:
# checks if current centroid exists but previous centroid does not
# new car to be added in case minx_index2 is empty
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
# The section below labels the centroids on screen
currentcars = 0 # current cars on screen
currentcarsindex = [] # current cars on screen carid index
for i in range(len(carids)): # loops through all carids
if df.at[int(framenumber), str(carids[i])] != '':
# checks the current frame to see which car ids are active
# by checking in centroid exists on current frame for certain car id
currentcars = currentcars + 1 # adds another to current cars on screen
currentcarsindex.append(i) # adds car ids to current cars on screen
for i in range(currentcars): # loops through all current car ids on screen
# grabs centroid of certain carid for current frame
curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]
# grabs centroid of certain carid for previous frame
oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]
if curcent: # if there is a current centroid
# On-screen text for current centroid
#cv2.putText(image, "Centroid" + str(curcent[0]) + "," + str(curcent[1]),
#(int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.putText(image, "ID:" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
if oldcent: # checks if old centroid exists
# adds radius box from previous centroid to current centroid for visualization
xstart = oldcent[0] - maxrad
ystart = oldcent[1] - maxrad
xwidth = oldcent[0] + maxrad
yheight = oldcent[1] + maxrad
#cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)
framenumber = framenumber + 1
cv2.imshow('Intersection Flow Prediction',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
df.to_csv('grounddata1.csv', sep=',') | #arguments to start with
ap.add_argument("-b", "--buffer", type=int, default=5000,
help="max buffer size")
args = vars(ap.parse_args())
| random_line_split |
cartracker.py | import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
import imutils
from collections import deque
import argparse
import pandas as pd
import random
cap = cv2.VideoCapture('cardi.MP4')
ap = argparse.ArgumentParser()
#arguments to start with
ap.add_argument("-b", "--buffer", type=int, default=5000,
help="max buffer size")
args = vars(ap.parse_args())
# create background subtractor
fgbg = cv2.createBackgroundSubtractorMOG2()
# where the centroids will be stored
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
#setting variables before the image processing
frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = int(width)
height = int(height)
print(frames_count, fps, width, height)
# creates a pandas data frame with the number of rows the same length as frame count
df = pd.DataFrame(index=range(int(frames_count)))
df.index.name = "Frames"
framenumber = 0 # keeps track of current frame
carids = [] # blank list to add car ids
totalcars = 0 # keeps track of total cars
#capturing data
while(True):
# Capture two frames
ret, frame1 = cap.read() # first image
time.sleep(1/25) # slight delay
ret, frame2 = cap.read() # second image
image = cv2.resize(frame1, (0, 0), None, 1,1)
#getting the difference as the basis for movement
diff = cv2.absdiff(frame1,frame2)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
th =25
imask = mask > th
canvas = np.zeros_like(frame2, np.uint8)
canvas[imask] = frame1[imask]
mask = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
#canvas = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# transforms
fgmask = fgbg.apply(mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30,30))
#dilation = cv2.dilate(fgmask, kernel)
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
#opening = cv2.morphologyEx(closing, cv2.MORPH_ERODE, kernel)
mask =closing
# variable for contours
ret,thresh = cv2.threshold(mask,0,255,0)
# creates contours/blobs
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# use convex hull to create polygon around contours
hull = [cv2.convexHull(c) for c in contours]
# draw contours
cv2.drawContours(mask, hull, -1, (0, 255, 0), 2)
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
# line created to stop counting contours, needed as cars in distance become one big contour
lineypos = 400
cv2.line(image, (-100, lineypos), (width, -120), (255, 0, 0), 3) # blue
lineypos2 = -700
cv2.line(image, (-150, lineypos2), (width, 700), (0, 255, 0), 3) # green
cv2.line(image, (-150, -100), (width, 1800), (255, 255,0), 3)
#creating centroids and boxes
for j in range(len(contours)):
if hierarchy[0, j, 3] == -1:
cnt=contours[j]
area = cv2.contourArea(cnt)
if 500 < area < 50000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
#getting variables for the centroids
cx = int(x + w/2)
cy = int(y + h/2)
cen = (cx,cy)
cv2.circle(image, (cx,cy), 7, (255,0,0), -1)
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
.5, (0, 0, 255), 1)
cxx[j] = cx
cyy[j] = cy
pts.appendleft(cen)
#this is for plotting the past centroid positions
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
|
#drawing hte current centroid
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
minx_index2 = []
miny_index2 = []
maxrad = 30
# if there are centroids in the specified area
if len(cxx):
if not carids: # if carids is empty
# loops through all centroids
for i in range(len(cxx)):
carids.append(i)
df[str(carids[i])] = ""
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1
else:
dx = np.zeros((len(cxx), len(carids)))
dy = np.zeros((len(cyy), len(carids)))
for i in range(len(cxx)):
for j in range(len(carids)):
# acquires centroid from previous frame for specific carid
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# acquires current frame centroid that doesn't necessarily line up with previous frame centroid
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows
continue # continue to next carid
else: # calculate centroid deltas to compare to current frame position later
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # loops through all current car ids
sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids
# finds which index carid had the min difference and this is true index
correctindextrue = np.argmin(np.abs(sumsum))
minx_index = correctindextrue
miny_index = correctindextrue
# acquires delta values of the minimum deltas in order to check if it is within radius later on
mindx = dx[minx_index, j]
mindy = dy[miny_index, j]
if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# checks if minimum value is 0 and checks if all deltas are zero since this is empty set
# delta could be zero if centroid didn't move
continue # continue to next carid
else:
# if delta values are less than maximum radius then add that centroid to that specific carid
if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:
# adds centroid to corresponding previously existing carid
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # appends all the indices that were added to previous carids
miny_index2.append(miny_index)
for i in range(len(cxx)): # loops through all centroids
# if centroid is not in the minindex list then another car needs to be added
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:
# checks if current centroid exists but previous centroid does not
# new car to be added in case minx_index2 is empty
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
# The section below labels the centroids on screen
currentcars = 0 # current cars on screen
currentcarsindex = [] # current cars on screen carid index
for i in range(len(carids)): # loops through all carids
if df.at[int(framenumber), str(carids[i])] != '':
# checks the current frame to see which car ids are active
# by checking in centroid exists on current frame for certain car id
currentcars = currentcars + 1 # adds another to current cars on screen
currentcarsindex.append(i) # adds car ids to current cars on screen
for i in range(currentcars): # loops through all current car ids on screen
# grabs centroid of certain carid for current frame
curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]
# grabs centroid of certain carid for previous frame
oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]
if curcent: # if there is a current centroid
# On-screen text for current centroid
#cv2.putText(image, "Centroid" + str(curcent[0]) + "," + str(curcent[1]),
#(int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.putText(image, "ID:" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
if oldcent: # checks if old centroid exists
# adds radius box from previous centroid to current centroid for visualization
xstart = oldcent[0] - maxrad
ystart = oldcent[1] - maxrad
xwidth = oldcent[0] + maxrad
yheight = oldcent[1] + maxrad
#cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)
framenumber = framenumber + 1
cv2.imshow('Intersection Flow Prediction',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
df.to_csv('grounddata1.csv', sep=',')
| if pts[i - 1] is None or pts[i] is None:
continue
# draw the centroid tracker
cv2.circle(image, (pts[i - 1]), 2, (0,0,255), -1) | conditional_block |
util.go | package cloudformation
import (
"bytes"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/lambda"
gocf "github.com/crewjam/go-cloudformation"
sparta "github.com/mweagle/Sparta"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"text/template"
)
var cloudFormationStackTemplateMap map[string]*gocf.Template
func init() {
cloudFormationStackTemplateMap = make(map[string]*gocf.Template, 0)
}
////////////////////////////////////////////////////////////////////////////////
// Private
////////////////////////////////////////////////////////////////////////////////
func toExpressionSlice(input interface{}) ([]string, error) {
var expressions []string
slice, sliceOK := input.([]interface{})
if !sliceOK {
return nil, fmt.Errorf("Failed to convert to slice")
}
for _, eachValue := range slice {
switch str := eachValue.(type) {
case string:
expressions = append(expressions, str)
}
}
return expressions, nil
}
func parseFnJoinExpr(data map[string]interface{}) (*gocf.StringExpr, error) {
if len(data) <= 0 {
return nil, fmt.Errorf("FnJoinExpr data is empty")
}
for eachKey, eachValue := range data {
switch eachKey {
case "Ref":
return gocf.Ref(eachValue.(string)).String(), nil
case "Fn::GetAtt":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 2 {
return nil, fmt.Errorf("Invalid params for Fn::GetAtt: %s", eachValue)
}
return gocf.GetAtt(attrValues[0], attrValues[1]).String(), nil
case "Fn::FindInMap":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 3 {
return nil, fmt.Errorf("Invalid params for Fn::FindInMap: %s", eachValue)
}
return gocf.FindInMap(attrValues[0], gocf.String(attrValues[1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
}
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template
session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr
}
// Get the current template - for each version we find in the version listing
// we look up the actual CF resource and copy it into this template
existingStackDefinition, existingStackDefinitionErr := existingStackTemplate(serviceName,
session,
logger)
if nil != existingStackDefinitionErr |
// TODO - fetch the template and look up the resources
existingVersions, existingVersionsErr := existingLambdaResourceVersions(serviceName,
lambdaResourceName,
session,
logger)
if nil != existingVersionsErr {
return nil, existingVersionsErr
}
// Initialize the auto incrementing version struct
autoIncrementingLambdaVersionInfo := AutoIncrementingLambdaVersionInfo{
CurrentVersion: 0,
CurrentVersionResourceName: "",
VersionHistory: make(map[int]string, 0),
}
lambdaVersionResourceName := func(versionIndex int) string {
return sparta.CloudFormationResourceName(lambdaResourceName,
"version",
strconv.Itoa(versionIndex))
}
if nil != existingVersions {
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"VersionCount": len(existingVersions.Versions) - 1, // Ignore $LATEST
"ResourceName": lambdaResourceName,
}).Info("Total number of published versions")
for _, eachEntry := range existingVersions.Versions {
versionIndex, versionIndexErr := strconv.Atoi(*eachEntry.Version)
if nil == versionIndexErr {
// Find the existing resource...
versionResourceName := lambdaVersionResourceName(versionIndex)
if nil == existingStackDefinition {
return nil, fmt.Errorf("Unable to find exising Version resource in nil Template")
}
cfResourceDefinition, cfResourceDefinitionExists := existingStackDefinition.Resources[versionResourceName]
if !cfResourceDefinitionExists {
return nil, fmt.Errorf("Unable to find exising Version resource (Resource: %s, Version: %d) in template",
versionResourceName,
versionIndex)
}
cfTemplate.Resources[versionResourceName] = cfResourceDefinition
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"Version": versionIndex,
"ResourceName": versionResourceName,
}).Debug("Preserving Lambda version")
// Store the state, tracking the latest version
autoIncrementingLambdaVersionInfo.VersionHistory[versionIndex] = versionResourceName
if versionIndex > autoIncrementingLambdaVersionInfo.CurrentVersion {
autoIncrementingLambdaVersionInfo.CurrentVersion = versionIndex
}
}
}
}
// Bump the version and add a new entry...
autoIncrementingLambdaVersionInfo.CurrentVersion++
versionResource := &gocf.LambdaVersion{
FunctionName: gocf.GetAtt(lambdaResourceName, "Arn").String(),
}
autoIncrementingLambdaVersionInfo.CurrentVersionResourceName = lambdaVersionResourceName(autoIncrementingLambdaVersionInfo.CurrentVersion)
cfTemplate.AddResource(autoIncrementingLambdaVersionInfo.CurrentVersionResourceName, versionResource)
// Log the version we're about to publish...
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
"StackVersion": autoIncrementingLambdaVersionInfo.CurrentVersion,
}).Info("Inserting new version resource")
return &autoIncrementingLambdaVersionInfo, nil
}
| {
return nil, existingStackDefinitionErr
} | conditional_block |
util.go | package cloudformation
import (
"bytes"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/lambda"
gocf "github.com/crewjam/go-cloudformation"
sparta "github.com/mweagle/Sparta"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"text/template"
)
var cloudFormationStackTemplateMap map[string]*gocf.Template
func init() {
cloudFormationStackTemplateMap = make(map[string]*gocf.Template, 0)
}
////////////////////////////////////////////////////////////////////////////////
// Private
////////////////////////////////////////////////////////////////////////////////
func toExpressionSlice(input interface{}) ([]string, error) {
var expressions []string
slice, sliceOK := input.([]interface{})
if !sliceOK {
return nil, fmt.Errorf("Failed to convert to slice")
}
for _, eachValue := range slice {
switch str := eachValue.(type) {
case string:
expressions = append(expressions, str)
}
}
return expressions, nil
}
func | (data map[string]interface{}) (*gocf.StringExpr, error) {
if len(data) <= 0 {
return nil, fmt.Errorf("FnJoinExpr data is empty")
}
for eachKey, eachValue := range data {
switch eachKey {
case "Ref":
return gocf.Ref(eachValue.(string)).String(), nil
case "Fn::GetAtt":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 2 {
return nil, fmt.Errorf("Invalid params for Fn::GetAtt: %s", eachValue)
}
return gocf.GetAtt(attrValues[0], attrValues[1]).String(), nil
case "Fn::FindInMap":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 3 {
return nil, fmt.Errorf("Invalid params for Fn::FindInMap: %s", eachValue)
}
return gocf.FindInMap(attrValues[0], gocf.String(attrValues[1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
}
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template
session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr
}
// Get the current template - for each version we find in the version listing
// we look up the actual CF resource and copy it into this template
existingStackDefinition, existingStackDefinitionErr := existingStackTemplate(serviceName,
session,
logger)
if nil != existingStackDefinitionErr {
return nil, existingStackDefinitionErr
}
// TODO - fetch the template and look up the resources
existingVersions, existingVersionsErr := existingLambdaResourceVersions(serviceName,
lambdaResourceName,
session,
logger)
if nil != existingVersionsErr {
return nil, existingVersionsErr
}
// Initialize the auto incrementing version struct
autoIncrementingLambdaVersionInfo := AutoIncrementingLambdaVersionInfo{
CurrentVersion: 0,
CurrentVersionResourceName: "",
VersionHistory: make(map[int]string, 0),
}
lambdaVersionResourceName := func(versionIndex int) string {
return sparta.CloudFormationResourceName(lambdaResourceName,
"version",
strconv.Itoa(versionIndex))
}
if nil != existingVersions {
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"VersionCount": len(existingVersions.Versions) - 1, // Ignore $LATEST
"ResourceName": lambdaResourceName,
}).Info("Total number of published versions")
for _, eachEntry := range existingVersions.Versions {
versionIndex, versionIndexErr := strconv.Atoi(*eachEntry.Version)
if nil == versionIndexErr {
// Find the existing resource...
versionResourceName := lambdaVersionResourceName(versionIndex)
if nil == existingStackDefinition {
return nil, fmt.Errorf("Unable to find exising Version resource in nil Template")
}
cfResourceDefinition, cfResourceDefinitionExists := existingStackDefinition.Resources[versionResourceName]
if !cfResourceDefinitionExists {
return nil, fmt.Errorf("Unable to find exising Version resource (Resource: %s, Version: %d) in template",
versionResourceName,
versionIndex)
}
cfTemplate.Resources[versionResourceName] = cfResourceDefinition
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"Version": versionIndex,
"ResourceName": versionResourceName,
}).Debug("Preserving Lambda version")
// Store the state, tracking the latest version
autoIncrementingLambdaVersionInfo.VersionHistory[versionIndex] = versionResourceName
if versionIndex > autoIncrementingLambdaVersionInfo.CurrentVersion {
autoIncrementingLambdaVersionInfo.CurrentVersion = versionIndex
}
}
}
}
// Bump the version and add a new entry...
autoIncrementingLambdaVersionInfo.CurrentVersion++
versionResource := &gocf.LambdaVersion{
FunctionName: gocf.GetAtt(lambdaResourceName, "Arn").String(),
}
autoIncrementingLambdaVersionInfo.CurrentVersionResourceName = lambdaVersionResourceName(autoIncrementingLambdaVersionInfo.CurrentVersion)
cfTemplate.AddResource(autoIncrementingLambdaVersionInfo.CurrentVersionResourceName, versionResource)
// Log the version we're about to publish...
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
"StackVersion": autoIncrementingLambdaVersionInfo.CurrentVersion,
}).Info("Inserting new version resource")
return &autoIncrementingLambdaVersionInfo, nil
}
| parseFnJoinExpr | identifier_name |
util.go | package cloudformation
import (
"bytes"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/lambda"
gocf "github.com/crewjam/go-cloudformation"
sparta "github.com/mweagle/Sparta"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"text/template"
)
var cloudFormationStackTemplateMap map[string]*gocf.Template
func init() {
cloudFormationStackTemplateMap = make(map[string]*gocf.Template, 0)
}
////////////////////////////////////////////////////////////////////////////////
// Private
////////////////////////////////////////////////////////////////////////////////
func toExpressionSlice(input interface{}) ([]string, error) {
var expressions []string
slice, sliceOK := input.([]interface{})
if !sliceOK {
return nil, fmt.Errorf("Failed to convert to slice")
}
for _, eachValue := range slice {
switch str := eachValue.(type) {
case string:
expressions = append(expressions, str)
}
}
return expressions, nil
}
func parseFnJoinExpr(data map[string]interface{}) (*gocf.StringExpr, error) {
if len(data) <= 0 {
return nil, fmt.Errorf("FnJoinExpr data is empty")
}
for eachKey, eachValue := range data {
switch eachKey {
case "Ref":
return gocf.Ref(eachValue.(string)).String(), nil
case "Fn::GetAtt":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 2 {
return nil, fmt.Errorf("Invalid params for Fn::GetAtt: %s", eachValue)
}
return gocf.GetAtt(attrValues[0], attrValues[1]).String(), nil
case "Fn::FindInMap":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 3 {
return nil, fmt.Errorf("Invalid params for Fn::FindInMap: %s", eachValue)
}
return gocf.FindInMap(attrValues[0], gocf.String(attrValues[1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr |
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template
session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr
}
// Get the current template - for each version we find in the version listing
// we look up the actual CF resource and copy it into this template
existingStackDefinition, existingStackDefinitionErr := existingStackTemplate(serviceName,
session,
logger)
if nil != existingStackDefinitionErr {
return nil, existingStackDefinitionErr
}
// TODO - fetch the template and look up the resources
existingVersions, existingVersionsErr := existingLambdaResourceVersions(serviceName,
lambdaResourceName,
session,
logger)
if nil != existingVersionsErr {
return nil, existingVersionsErr
}
// Initialize the auto incrementing version struct
autoIncrementingLambdaVersionInfo := AutoIncrementingLambdaVersionInfo{
CurrentVersion: 0,
CurrentVersionResourceName: "",
VersionHistory: make(map[int]string, 0),
}
lambdaVersionResourceName := func(versionIndex int) string {
return sparta.CloudFormationResourceName(lambdaResourceName,
"version",
strconv.Itoa(versionIndex))
}
if nil != existingVersions {
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"VersionCount": len(existingVersions.Versions) - 1, // Ignore $LATEST
"ResourceName": lambdaResourceName,
}).Info("Total number of published versions")
for _, eachEntry := range existingVersions.Versions {
versionIndex, versionIndexErr := strconv.Atoi(*eachEntry.Version)
if nil == versionIndexErr {
// Find the existing resource...
versionResourceName := lambdaVersionResourceName(versionIndex)
if nil == existingStackDefinition {
return nil, fmt.Errorf("Unable to find exising Version resource in nil Template")
}
cfResourceDefinition, cfResourceDefinitionExists := existingStackDefinition.Resources[versionResourceName]
if !cfResourceDefinitionExists {
return nil, fmt.Errorf("Unable to find exising Version resource (Resource: %s, Version: %d) in template",
versionResourceName,
versionIndex)
}
cfTemplate.Resources[versionResourceName] = cfResourceDefinition
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"Version": versionIndex,
"ResourceName": versionResourceName,
}).Debug("Preserving Lambda version")
// Store the state, tracking the latest version
autoIncrementingLambdaVersionInfo.VersionHistory[versionIndex] = versionResourceName
if versionIndex > autoIncrementingLambdaVersionInfo.CurrentVersion {
autoIncrementingLambdaVersionInfo.CurrentVersion = versionIndex
}
}
}
}
// Bump the version and add a new entry...
autoIncrementingLambdaVersionInfo.CurrentVersion++
versionResource := &gocf.LambdaVersion{
FunctionName: gocf.GetAtt(lambdaResourceName, "Arn").String(),
}
autoIncrementingLambdaVersionInfo.CurrentVersionResourceName = lambdaVersionResourceName(autoIncrementingLambdaVersionInfo.CurrentVersion)
cfTemplate.AddResource(autoIncrementingLambdaVersionInfo.CurrentVersionResourceName, versionResource)
// Log the version we're about to publish...
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
"StackVersion": autoIncrementingLambdaVersionInfo.CurrentVersion,
}).Info("Inserting new version resource")
return &autoIncrementingLambdaVersionInfo, nil
}
| {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
} | identifier_body |
util.go | package cloudformation
import (
"bytes"
"encoding/json"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/lambda"
gocf "github.com/crewjam/go-cloudformation"
sparta "github.com/mweagle/Sparta"
"io"
"io/ioutil"
"regexp"
"strconv"
"strings"
"text/template"
)
var cloudFormationStackTemplateMap map[string]*gocf.Template
func init() {
cloudFormationStackTemplateMap = make(map[string]*gocf.Template, 0)
}
////////////////////////////////////////////////////////////////////////////////
// Private
////////////////////////////////////////////////////////////////////////////////
func toExpressionSlice(input interface{}) ([]string, error) {
var expressions []string
slice, sliceOK := input.([]interface{})
if !sliceOK {
return nil, fmt.Errorf("Failed to convert to slice")
}
for _, eachValue := range slice {
switch str := eachValue.(type) {
case string:
expressions = append(expressions, str)
}
}
return expressions, nil
}
func parseFnJoinExpr(data map[string]interface{}) (*gocf.StringExpr, error) {
if len(data) <= 0 {
return nil, fmt.Errorf("FnJoinExpr data is empty")
}
for eachKey, eachValue := range data {
switch eachKey {
case "Ref":
return gocf.Ref(eachValue.(string)).String(), nil
case "Fn::GetAtt":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 2 {
return nil, fmt.Errorf("Invalid params for Fn::GetAtt: %s", eachValue)
}
return gocf.GetAtt(attrValues[0], attrValues[1]).String(), nil
case "Fn::FindInMap":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 3 {
return nil, fmt.Errorf("Invalid params for Fn::FindInMap: %s", eachValue)
}
return gocf.FindInMap(attrValues[0], gocf.String(attrValues[1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
}
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template | }
// Get the current template - for each version we find in the version listing
// we look up the actual CF resource and copy it into this template
existingStackDefinition, existingStackDefinitionErr := existingStackTemplate(serviceName,
session,
logger)
if nil != existingStackDefinitionErr {
return nil, existingStackDefinitionErr
}
// TODO - fetch the template and look up the resources
existingVersions, existingVersionsErr := existingLambdaResourceVersions(serviceName,
lambdaResourceName,
session,
logger)
if nil != existingVersionsErr {
return nil, existingVersionsErr
}
// Initialize the auto incrementing version struct
autoIncrementingLambdaVersionInfo := AutoIncrementingLambdaVersionInfo{
CurrentVersion: 0,
CurrentVersionResourceName: "",
VersionHistory: make(map[int]string, 0),
}
lambdaVersionResourceName := func(versionIndex int) string {
return sparta.CloudFormationResourceName(lambdaResourceName,
"version",
strconv.Itoa(versionIndex))
}
if nil != existingVersions {
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"VersionCount": len(existingVersions.Versions) - 1, // Ignore $LATEST
"ResourceName": lambdaResourceName,
}).Info("Total number of published versions")
for _, eachEntry := range existingVersions.Versions {
versionIndex, versionIndexErr := strconv.Atoi(*eachEntry.Version)
if nil == versionIndexErr {
// Find the existing resource...
versionResourceName := lambdaVersionResourceName(versionIndex)
if nil == existingStackDefinition {
return nil, fmt.Errorf("Unable to find exising Version resource in nil Template")
}
cfResourceDefinition, cfResourceDefinitionExists := existingStackDefinition.Resources[versionResourceName]
if !cfResourceDefinitionExists {
return nil, fmt.Errorf("Unable to find exising Version resource (Resource: %s, Version: %d) in template",
versionResourceName,
versionIndex)
}
cfTemplate.Resources[versionResourceName] = cfResourceDefinition
// Add the CloudFormation resource
logger.WithFields(logrus.Fields{
"Version": versionIndex,
"ResourceName": versionResourceName,
}).Debug("Preserving Lambda version")
// Store the state, tracking the latest version
autoIncrementingLambdaVersionInfo.VersionHistory[versionIndex] = versionResourceName
if versionIndex > autoIncrementingLambdaVersionInfo.CurrentVersion {
autoIncrementingLambdaVersionInfo.CurrentVersion = versionIndex
}
}
}
}
// Bump the version and add a new entry...
autoIncrementingLambdaVersionInfo.CurrentVersion++
versionResource := &gocf.LambdaVersion{
FunctionName: gocf.GetAtt(lambdaResourceName, "Arn").String(),
}
autoIncrementingLambdaVersionInfo.CurrentVersionResourceName = lambdaVersionResourceName(autoIncrementingLambdaVersionInfo.CurrentVersion)
cfTemplate.AddResource(autoIncrementingLambdaVersionInfo.CurrentVersionResourceName, versionResource)
// Log the version we're about to publish...
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
"StackVersion": autoIncrementingLambdaVersionInfo.CurrentVersion,
}).Info("Inserting new version resource")
return &autoIncrementingLambdaVersionInfo, nil
} | session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr | random_line_split |
gravity.js | // Gravity.js
// Responsive framework for intuitive and easy web design
/*
The semi-colon before the function invocation is a safety net against
concatenated scripts and/or other plugins which may not be closed properly.
"undefined" is used because the undefined global variable in ECMAScript 3
is mutable (ie. it can be changed by someone else). Because we don't pass a
value to undefined when the anonymyous function is invoked, we ensure that
undefined is truly undefined. Note, in ECMAScript 5 undefined can no
longer be modified.
"window" and "document" are passed as local variables rather than global.
This (slightly) quickens the resolution process.
*/
;(function ( $, window, document, undefined ) {
/*
Store the name of the plugin in the "pluginName" variable. This
variable is used in the "Plugin" constructor below, as well as the
plugin wrapper to construct the key for the "$.data" method.
More: http://api.jquery.com/jquery.data/
*/
var pluginName = 'gravity';
/*
The "Plugin" constructor, builds a new instance of the plugin for the
DOM node(s) that the plugin is called on. For example,
"$('h1').pluginName();" creates a new instance of pluginName for
all h1's.
*/
// Create the plugin constructor
function | ( element, options ) {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is
empty so that we don't alter the default options for future
instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top){
group.data.padding = {
top: delta.top - group.data.offset.top
};
}else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
});
},
// Unbind events that trigger methods
unbindEvents: function() {
/*
Unbind all events in our plugin's namespace that are attached
to "this.$element".
*/
this.$element.off('.'+this._name);
},
/*
"someOtherFunction" is an example of a custom method in your
plugin. Each method should perform a specific task. For example,
the buildCache method exists only to create variables for other
methods to access. The bindEvents method exists only to bind events
to event handlers that trigger other methods. Creating custom
plugin methods this way is less confusing (separation of concerns)
and makes your code easier to test.
*/
// Create custom methods
someOtherFunction: function() {
alert('I promise to do something cool!');
this.callback();
},
callback: function() {
// Cache onComplete option
var onComplete = this.options.onComplete;
if ( typeof onComplete === 'function' ) {
/*
Use the "call" method so that inside of the onComplete
callback function the "this" keyword refers to the
specific DOM node that called the plugin.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
onComplete.call(this.element);
}
}
});
/*
Create a lightweight plugin wrapper around the "Plugin" constructor,
preventing against multiple instantiations.
More: http://learn.jquery.com/plugins/basic-plugin-creation/
*/
$.fn.gravity = function ( options ) {
this.each(function() {
if ( !$.data( this, "plugin_" + pluginName ) ) {
/*
Use "$.data" to save each instance of the plugin in case
the user wants to modify it. Using "$.data" in this way
ensures the data is removed when the DOM element(s) are
removed via jQuery methods, as well as when the userleaves
the page. It's a smart way to prevent memory leaks.
More: http://api.jquery.com/jquery.data/
*/
$.data( this, "plugin_" + pluginName, new Plugin( this, options ) );
}
});
/*
"return this;" returns the original jQuery object. This allows
additional jQuery methods to be chained.
*/
return this;
};
/*
Attach the default plugin options directly to the plugin object. This
allows users to override default plugin options globally, instead of
passing the same option(s) every time the plugin is initialized.
For example, the user could set the "property" value once for all
instances of the plugin with
"$.fn.pluginName.defaults.property = 'myValue';". Then, every time
plugin is initialized, "property" will be set to "myValue".
More: http://learn.jquery.com/plugins/advanced-plugin-concepts/
*/
$.fn.gravity.defaults = {
gravitation: [{ name: 'g1', top: '50%', left: '50%' }],
k: 0.618,
density: 10,
elements: [],
onComplete: null
};
if($('.gravity').length){
$('body').gravity();
}
})( jQuery, window, document );
| Plugin | identifier_name |
gravity.js | // Gravity.js
// Responsive framework for intuitive and easy web design
/*
The semi-colon before the function invocation is a safety net against
concatenated scripts and/or other plugins which may not be closed properly.
"undefined" is used because the undefined global variable in ECMAScript 3
is mutable (ie. it can be changed by someone else). Because we don't pass a
value to undefined when the anonymyous function is invoked, we ensure that
undefined is truly undefined. Note, in ECMAScript 5 undefined can no
longer be modified.
"window" and "document" are passed as local variables rather than global.
This (slightly) quickens the resolution process.
*/
;(function ( $, window, document, undefined ) {
/*
Store the name of the plugin in the "pluginName" variable. This
variable is used in the "Plugin" constructor below, as well as the
plugin wrapper to construct the key for the "$.data" method.
More: http://api.jquery.com/jquery.data/
*/
var pluginName = 'gravity';
/*
The "Plugin" constructor, builds a new instance of the plugin for the
DOM node(s) that the plugin is called on. For example,
"$('h1').pluginName();" creates a new instance of pluginName for
all h1's.
*/
// Create the plugin constructor
function Plugin ( element, options ) |
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top){
group.data.padding = {
top: delta.top - group.data.offset.top
};
}else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
});
},
// Unbind events that trigger methods
unbindEvents: function() {
/*
Unbind all events in our plugin's namespace that are attached
to "this.$element".
*/
this.$element.off('.'+this._name);
},
/*
"someOtherFunction" is an example of a custom method in your
plugin. Each method should perform a specific task. For example,
the buildCache method exists only to create variables for other
methods to access. The bindEvents method exists only to bind events
to event handlers that trigger other methods. Creating custom
plugin methods this way is less confusing (separation of concerns)
and makes your code easier to test.
*/
// Create custom methods
someOtherFunction: function() {
alert('I promise to do something cool!');
this.callback();
},
callback: function() {
// Cache onComplete option
var onComplete = this.options.onComplete;
if ( typeof onComplete === 'function' ) {
/*
Use the "call" method so that inside of the onComplete
callback function the "this" keyword refers to the
specific DOM node that called the plugin.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
onComplete.call(this.element);
}
}
});
/*
Create a lightweight plugin wrapper around the "Plugin" constructor,
preventing against multiple instantiations.
More: http://learn.jquery.com/plugins/basic-plugin-creation/
*/
$.fn.gravity = function ( options ) {
this.each(function() {
if ( !$.data( this, "plugin_" + pluginName ) ) {
/*
Use "$.data" to save each instance of the plugin in case
the user wants to modify it. Using "$.data" in this way
ensures the data is removed when the DOM element(s) are
removed via jQuery methods, as well as when the userleaves
the page. It's a smart way to prevent memory leaks.
More: http://api.jquery.com/jquery.data/
*/
$.data( this, "plugin_" + pluginName, new Plugin( this, options ) );
}
});
/*
"return this;" returns the original jQuery object. This allows
additional jQuery methods to be chained.
*/
return this;
};
/*
Attach the default plugin options directly to the plugin object. This
allows users to override default plugin options globally, instead of
passing the same option(s) every time the plugin is initialized.
For example, the user could set the "property" value once for all
instances of the plugin with
"$.fn.pluginName.defaults.property = 'myValue';". Then, every time
plugin is initialized, "property" will be set to "myValue".
More: http://learn.jquery.com/plugins/advanced-plugin-concepts/
*/
$.fn.gravity.defaults = {
gravitation: [{ name: 'g1', top: '50%', left: '50%' }],
k: 0.618,
density: 10,
elements: [],
onComplete: null
};
if($('.gravity').length){
$('body').gravity();
}
})( jQuery, window, document );
| {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is
empty so that we don't alter the default options for future
instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
} | identifier_body |
gravity.js | // Gravity.js
// Responsive framework for intuitive and easy web design
/*
The semi-colon before the function invocation is a safety net against
concatenated scripts and/or other plugins which may not be closed properly.
"undefined" is used because the undefined global variable in ECMAScript 3
is mutable (ie. it can be changed by someone else). Because we don't pass a
value to undefined when the anonymyous function is invoked, we ensure that
undefined is truly undefined. Note, in ECMAScript 5 undefined can no
longer be modified.
"window" and "document" are passed as local variables rather than global.
This (slightly) quickens the resolution process.
*/
;(function ( $, window, document, undefined ) {
/*
Store the name of the plugin in the "pluginName" variable. This
variable is used in the "Plugin" constructor below, as well as the
plugin wrapper to construct the key for the "$.data" method.
More: http://api.jquery.com/jquery.data/
*/
var pluginName = 'gravity';
/*
The "Plugin" constructor, builds a new instance of the plugin for the
DOM node(s) that the plugin is called on. For example,
"$('h1').pluginName();" creates a new instance of pluginName for
all h1's.
*/
// Create the plugin constructor
function Plugin ( element, options ) {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is
empty so that we don't alter the default options for future
instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top) | else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
});
},
// Unbind events that trigger methods
unbindEvents: function() {
/*
Unbind all events in our plugin's namespace that are attached
to "this.$element".
*/
this.$element.off('.'+this._name);
},
/*
"someOtherFunction" is an example of a custom method in your
plugin. Each method should perform a specific task. For example,
the buildCache method exists only to create variables for other
methods to access. The bindEvents method exists only to bind events
to event handlers that trigger other methods. Creating custom
plugin methods this way is less confusing (separation of concerns)
and makes your code easier to test.
*/
// Create custom methods
someOtherFunction: function() {
alert('I promise to do something cool!');
this.callback();
},
callback: function() {
// Cache onComplete option
var onComplete = this.options.onComplete;
if ( typeof onComplete === 'function' ) {
/*
Use the "call" method so that inside of the onComplete
callback function the "this" keyword refers to the
specific DOM node that called the plugin.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
onComplete.call(this.element);
}
}
});
/*
Create a lightweight plugin wrapper around the "Plugin" constructor,
preventing against multiple instantiations.
More: http://learn.jquery.com/plugins/basic-plugin-creation/
*/
$.fn.gravity = function ( options ) {
this.each(function() {
if ( !$.data( this, "plugin_" + pluginName ) ) {
/*
Use "$.data" to save each instance of the plugin in case
the user wants to modify it. Using "$.data" in this way
ensures the data is removed when the DOM element(s) are
removed via jQuery methods, as well as when the userleaves
the page. It's a smart way to prevent memory leaks.
More: http://api.jquery.com/jquery.data/
*/
$.data( this, "plugin_" + pluginName, new Plugin( this, options ) );
}
});
/*
"return this;" returns the original jQuery object. This allows
additional jQuery methods to be chained.
*/
return this;
};
/*
Attach the default plugin options directly to the plugin object. This
allows users to override default plugin options globally, instead of
passing the same option(s) every time the plugin is initialized.
For example, the user could set the "property" value once for all
instances of the plugin with
"$.fn.pluginName.defaults.property = 'myValue';". Then, every time
plugin is initialized, "property" will be set to "myValue".
More: http://learn.jquery.com/plugins/advanced-plugin-concepts/
*/
$.fn.gravity.defaults = {
gravitation: [{ name: 'g1', top: '50%', left: '50%' }],
k: 0.618,
density: 10,
elements: [],
onComplete: null
};
if($('.gravity').length){
$('body').gravity();
}
})( jQuery, window, document );
| {
group.data.padding = {
top: delta.top - group.data.offset.top
};
} | conditional_block |
gravity.js | // Gravity.js
// Responsive framework for intuitive and easy web design
/*
The semi-colon before the function invocation is a safety net against
concatenated scripts and/or other plugins which may not be closed properly.
"undefined" is used because the undefined global variable in ECMAScript 3
is mutable (ie. it can be changed by someone else). Because we don't pass a
value to undefined when the anonymyous function is invoked, we ensure that
undefined is truly undefined. Note, in ECMAScript 5 undefined can no
longer be modified.
"window" and "document" are passed as local variables rather than global.
This (slightly) quickens the resolution process.
*/
;(function ( $, window, document, undefined ) {
/*
Store the name of the plugin in the "pluginName" variable. This
variable is used in the "Plugin" constructor below, as well as the
plugin wrapper to construct the key for the "$.data" method.
More: http://api.jquery.com/jquery.data/
*/
var pluginName = 'gravity';
/*
The "Plugin" constructor, builds a new instance of the plugin for the
DOM node(s) that the plugin is called on. For example,
"$('h1').pluginName();" creates a new instance of pluginName for
all h1's.
*/
// Create the plugin constructor
function Plugin ( element, options ) {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is | instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top){
group.data.padding = {
top: delta.top - group.data.offset.top
};
}else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
});
},
// Unbind events that trigger methods
unbindEvents: function() {
/*
Unbind all events in our plugin's namespace that are attached
to "this.$element".
*/
this.$element.off('.'+this._name);
},
/*
"someOtherFunction" is an example of a custom method in your
plugin. Each method should perform a specific task. For example,
the buildCache method exists only to create variables for other
methods to access. The bindEvents method exists only to bind events
to event handlers that trigger other methods. Creating custom
plugin methods this way is less confusing (separation of concerns)
and makes your code easier to test.
*/
// Create custom methods
someOtherFunction: function() {
alert('I promise to do something cool!');
this.callback();
},
callback: function() {
// Cache onComplete option
var onComplete = this.options.onComplete;
if ( typeof onComplete === 'function' ) {
/*
Use the "call" method so that inside of the onComplete
callback function the "this" keyword refers to the
specific DOM node that called the plugin.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
onComplete.call(this.element);
}
}
});
/*
Create a lightweight plugin wrapper around the "Plugin" constructor,
preventing against multiple instantiations.
More: http://learn.jquery.com/plugins/basic-plugin-creation/
*/
$.fn.gravity = function ( options ) {
this.each(function() {
if ( !$.data( this, "plugin_" + pluginName ) ) {
/*
Use "$.data" to save each instance of the plugin in case
the user wants to modify it. Using "$.data" in this way
ensures the data is removed when the DOM element(s) are
removed via jQuery methods, as well as when the userleaves
the page. It's a smart way to prevent memory leaks.
More: http://api.jquery.com/jquery.data/
*/
$.data( this, "plugin_" + pluginName, new Plugin( this, options ) );
}
});
/*
"return this;" returns the original jQuery object. This allows
additional jQuery methods to be chained.
*/
return this;
};
/*
Attach the default plugin options directly to the plugin object. This
allows users to override default plugin options globally, instead of
passing the same option(s) every time the plugin is initialized.
For example, the user could set the "property" value once for all
instances of the plugin with
"$.fn.pluginName.defaults.property = 'myValue';". Then, every time
plugin is initialized, "property" will be set to "myValue".
More: http://learn.jquery.com/plugins/advanced-plugin-concepts/
*/
$.fn.gravity.defaults = {
gravitation: [{ name: 'g1', top: '50%', left: '50%' }],
k: 0.618,
density: 10,
elements: [],
onComplete: null
};
if($('.gravity').length){
$('body').gravity();
}
})( jQuery, window, document ); | empty so that we don't alter the default options for future | random_line_split |
lsfiles.go | package git
import (
"fmt"
"io/ioutil"
"log"
"path"
"path/filepath"
"sort"
"strings"
)
// Finds things that aren't tracked, and creates fake IndexEntrys for them to be merged into
// the output if --others is passed.
func findUntrackedFilesFromDir(c *Client, opts LsFilesOptions, root, parent, dir File, tracked map[IndexPath]bool, recursedir bool, ignorePatterns []IgnorePattern) (untracked []*IndexEntry) {
files, err := ioutil.ReadDir(dir.String())
if err != nil {
return nil
}
for _, ignorefile := range opts.ExcludePerDirectory {
ignoreInDir := ignorefile
if dir != "" {
ignoreInDir = dir + "/" + ignorefile
}
if ignoreInDir.Exists() {
log.Println("Adding excludes from", ignoreInDir)
patterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)
if err != nil {
continue
}
ignorePatterns = append(ignorePatterns, patterns...)
}
}
files:
for _, fi := range files {
fname := File(fi.Name())
if fi.Name() == ".git" {
continue
}
for _, pattern := range ignorePatterns {
var name File
if parent == "" {
name = fname
} else {
name = parent + "/" + fname
}
if pattern.Matches(name.String(), fi.IsDir()) {
continue files
}
}
if fi.IsDir() {
if !recursedir {
// This isn't very efficient, but lets us implement git ls-files --directory
// without too many changes.
indexPath, err := (parent + "/" + fname).IndexPath(c)
if err != nil {
panic(err)
}
dirHasTracked := false
for path := range tracked {
if strings.HasPrefix(path.String(), indexPath.String()) {
dirHasTracked = true
break
}
}
if !dirHasTracked {
if opts.Directory {
if opts.NoEmptyDirectory {
if files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {
continue
}
}
indexPath += "/"
}
untracked = append(untracked, &IndexEntry{PathName: indexPath})
continue
}
}
var newparent, newdir File
if parent == "" {
newparent = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files {
indexPath, err := file.IndexPath(c)
if err != nil {
return nil, err
}
if _, ok := filesInIndex[indexPath]; !ok {
return nil, fmt.Errorf("error: pathspec '%v' did not match any file(s) known to git", file)
}
}
}
if opt.Others {
wd := File(c.WorkDir)
ignorePatterns := []IgnorePattern{}
if opt.ExcludeStandard {
opt.ExcludeFiles = append(opt.ExcludeFiles, File(filepath.Join(c.GitDir.String(), "info/exclude")))
opt.ExcludePerDirectory = append(opt.ExcludePerDirectory, ".gitignore")
}
for _, file := range opt.ExcludeFiles {
patterns, err := ParseIgnorePatterns(c, file, "")
if err != nil {
return nil, err
}
ignorePatterns = append(ignorePatterns, patterns...)
}
for _, pattern := range opt.ExcludePatterns {
ignorePatterns = append(ignorePatterns, IgnorePattern{Pattern: pattern, Source: "", LineNum: 1, Scope: ""})
}
others := findUntrackedFilesFromDir(c, opt, wd+"/", wd, wd, filesInIndex, !opt.Directory, ignorePatterns)
for _, file := range others {
f, err := file.PathName.FilePath(c)
if err != nil {
return nil, err
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
}
if skip |
}
fs = append(fs, LsFilesResult{file, '?'})
}
}
sort.Sort(lsByPath(fs))
return fs, nil
}
// Implement the sort interface on *GitIndexEntry, so that
// it's easy to sort by name.
type lsByPath []LsFilesResult
func (g lsByPath) Len() int { return len(g) }
func (g lsByPath) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
func (g lsByPath) Less(i, j int) bool {
if g[i].PathName == g[j].PathName {
return g[i].Stage() < g[j].Stage()
}
ibytes := []byte(g[i].PathName)
jbytes := []byte(g[j].PathName)
for k := range ibytes {
if k >= len(jbytes) {
// We reached the end of j and there was stuff
// leftover in i, so i > j
return false
}
// If a character is not equal, return if it's
// less or greater
if ibytes[k] < jbytes[k] {
return true
} else if ibytes[k] > jbytes[k] {
return false
}
}
// Everything equal up to the end of i, and there is stuff
// left in j, so i < j
return true
}
| {
continue
} | conditional_block |
lsfiles.go | package git
import (
"fmt"
"io/ioutil"
"log"
"path"
"path/filepath"
"sort"
"strings"
)
// Finds things that aren't tracked, and creates fake IndexEntrys for them to be merged into
// the output if --others is passed.
func findUntrackedFilesFromDir(c *Client, opts LsFilesOptions, root, parent, dir File, tracked map[IndexPath]bool, recursedir bool, ignorePatterns []IgnorePattern) (untracked []*IndexEntry) {
files, err := ioutil.ReadDir(dir.String())
if err != nil {
return nil
}
for _, ignorefile := range opts.ExcludePerDirectory {
ignoreInDir := ignorefile
if dir != "" {
ignoreInDir = dir + "/" + ignorefile
}
if ignoreInDir.Exists() {
log.Println("Adding excludes from", ignoreInDir)
patterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)
if err != nil {
continue
}
ignorePatterns = append(ignorePatterns, patterns...)
}
}
files:
for _, fi := range files {
fname := File(fi.Name())
if fi.Name() == ".git" {
continue
}
for _, pattern := range ignorePatterns {
var name File
if parent == "" {
name = fname
} else {
name = parent + "/" + fname
}
if pattern.Matches(name.String(), fi.IsDir()) {
continue files
}
}
if fi.IsDir() {
if !recursedir {
// This isn't very efficient, but lets us implement git ls-files --directory
// without too many changes.
indexPath, err := (parent + "/" + fname).IndexPath(c)
if err != nil {
panic(err)
}
dirHasTracked := false
for path := range tracked {
if strings.HasPrefix(path.String(), indexPath.String()) {
dirHasTracked = true
break
}
}
if !dirHasTracked {
if opts.Directory {
if opts.NoEmptyDirectory {
if files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {
continue
}
}
indexPath += "/"
}
untracked = append(untracked, &IndexEntry{PathName: indexPath})
continue
}
}
var newparent, newdir File
if parent == "" {
newparent = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files {
indexPath, err := file.IndexPath(c)
if err != nil {
return nil, err
}
if _, ok := filesInIndex[indexPath]; !ok {
return nil, fmt.Errorf("error: pathspec '%v' did not match any file(s) known to git", file)
}
}
}
if opt.Others {
wd := File(c.WorkDir)
ignorePatterns := []IgnorePattern{}
if opt.ExcludeStandard {
opt.ExcludeFiles = append(opt.ExcludeFiles, File(filepath.Join(c.GitDir.String(), "info/exclude")))
opt.ExcludePerDirectory = append(opt.ExcludePerDirectory, ".gitignore")
}
for _, file := range opt.ExcludeFiles {
patterns, err := ParseIgnorePatterns(c, file, "")
if err != nil {
return nil, err
}
ignorePatterns = append(ignorePatterns, patterns...)
}
for _, pattern := range opt.ExcludePatterns {
ignorePatterns = append(ignorePatterns, IgnorePattern{Pattern: pattern, Source: "", LineNum: 1, Scope: ""})
}
others := findUntrackedFilesFromDir(c, opt, wd+"/", wd, wd, filesInIndex, !opt.Directory, ignorePatterns)
for _, file := range others {
f, err := file.PathName.FilePath(c)
if err != nil {
return nil, err
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
}
if skip {
continue
}
}
fs = append(fs, LsFilesResult{file, '?'})
}
}
sort.Sort(lsByPath(fs))
return fs, nil
}
// Implement the sort interface on *GitIndexEntry, so that
// it's easy to sort by name.
type lsByPath []LsFilesResult
func (g lsByPath) | () int { return len(g) }
func (g lsByPath) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
func (g lsByPath) Less(i, j int) bool {
if g[i].PathName == g[j].PathName {
return g[i].Stage() < g[j].Stage()
}
ibytes := []byte(g[i].PathName)
jbytes := []byte(g[j].PathName)
for k := range ibytes {
if k >= len(jbytes) {
// We reached the end of j and there was stuff
// leftover in i, so i > j
return false
}
// If a character is not equal, return if it's
// less or greater
if ibytes[k] < jbytes[k] {
return true
} else if ibytes[k] > jbytes[k] {
return false
}
}
// Everything equal up to the end of i, and there is stuff
// left in j, so i < j
return true
}
| Len | identifier_name |
lsfiles.go | package git
import (
"fmt"
"io/ioutil"
"log"
"path"
"path/filepath"
"sort"
"strings"
)
// Finds things that aren't tracked, and creates fake IndexEntrys for them to be merged into
// the output if --others is passed.
func findUntrackedFilesFromDir(c *Client, opts LsFilesOptions, root, parent, dir File, tracked map[IndexPath]bool, recursedir bool, ignorePatterns []IgnorePattern) (untracked []*IndexEntry) |
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files {
indexPath, err := file.IndexPath(c)
if err != nil {
return nil, err
}
if _, ok := filesInIndex[indexPath]; !ok {
return nil, fmt.Errorf("error: pathspec '%v' did not match any file(s) known to git", file)
}
}
}
if opt.Others {
wd := File(c.WorkDir)
ignorePatterns := []IgnorePattern{}
if opt.ExcludeStandard {
opt.ExcludeFiles = append(opt.ExcludeFiles, File(filepath.Join(c.GitDir.String(), "info/exclude")))
opt.ExcludePerDirectory = append(opt.ExcludePerDirectory, ".gitignore")
}
for _, file := range opt.ExcludeFiles {
patterns, err := ParseIgnorePatterns(c, file, "")
if err != nil {
return nil, err
}
ignorePatterns = append(ignorePatterns, patterns...)
}
for _, pattern := range opt.ExcludePatterns {
ignorePatterns = append(ignorePatterns, IgnorePattern{Pattern: pattern, Source: "", LineNum: 1, Scope: ""})
}
others := findUntrackedFilesFromDir(c, opt, wd+"/", wd, wd, filesInIndex, !opt.Directory, ignorePatterns)
for _, file := range others {
f, err := file.PathName.FilePath(c)
if err != nil {
return nil, err
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
}
if skip {
continue
}
}
fs = append(fs, LsFilesResult{file, '?'})
}
}
sort.Sort(lsByPath(fs))
return fs, nil
}
// Implement the sort interface on *GitIndexEntry, so that
// it's easy to sort by name.
type lsByPath []LsFilesResult
func (g lsByPath) Len() int { return len(g) }
func (g lsByPath) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
func (g lsByPath) Less(i, j int) bool {
if g[i].PathName == g[j].PathName {
return g[i].Stage() < g[j].Stage()
}
ibytes := []byte(g[i].PathName)
jbytes := []byte(g[j].PathName)
for k := range ibytes {
if k >= len(jbytes) {
// We reached the end of j and there was stuff
// leftover in i, so i > j
return false
}
// If a character is not equal, return if it's
// less or greater
if ibytes[k] < jbytes[k] {
return true
} else if ibytes[k] > jbytes[k] {
return false
}
}
// Everything equal up to the end of i, and there is stuff
// left in j, so i < j
return true
}
| {
files, err := ioutil.ReadDir(dir.String())
if err != nil {
return nil
}
for _, ignorefile := range opts.ExcludePerDirectory {
ignoreInDir := ignorefile
if dir != "" {
ignoreInDir = dir + "/" + ignorefile
}
if ignoreInDir.Exists() {
log.Println("Adding excludes from", ignoreInDir)
patterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)
if err != nil {
continue
}
ignorePatterns = append(ignorePatterns, patterns...)
}
}
files:
for _, fi := range files {
fname := File(fi.Name())
if fi.Name() == ".git" {
continue
}
for _, pattern := range ignorePatterns {
var name File
if parent == "" {
name = fname
} else {
name = parent + "/" + fname
}
if pattern.Matches(name.String(), fi.IsDir()) {
continue files
}
}
if fi.IsDir() {
if !recursedir {
// This isn't very efficient, but lets us implement git ls-files --directory
// without too many changes.
indexPath, err := (parent + "/" + fname).IndexPath(c)
if err != nil {
panic(err)
}
dirHasTracked := false
for path := range tracked {
if strings.HasPrefix(path.String(), indexPath.String()) {
dirHasTracked = true
break
}
}
if !dirHasTracked {
if opts.Directory {
if opts.NoEmptyDirectory {
if files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {
continue
}
}
indexPath += "/"
}
untracked = append(untracked, &IndexEntry{PathName: indexPath})
continue
}
}
var newparent, newdir File
if parent == "" {
newparent = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
} | identifier_body |
lsfiles.go | package git
import (
"fmt"
"io/ioutil"
"log"
"path"
"path/filepath"
"sort"
"strings"
)
// Finds things that aren't tracked, and creates fake IndexEntrys for them to be merged into
// the output if --others is passed.
func findUntrackedFilesFromDir(c *Client, opts LsFilesOptions, root, parent, dir File, tracked map[IndexPath]bool, recursedir bool, ignorePatterns []IgnorePattern) (untracked []*IndexEntry) { | return nil
}
for _, ignorefile := range opts.ExcludePerDirectory {
ignoreInDir := ignorefile
if dir != "" {
ignoreInDir = dir + "/" + ignorefile
}
if ignoreInDir.Exists() {
log.Println("Adding excludes from", ignoreInDir)
patterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)
if err != nil {
continue
}
ignorePatterns = append(ignorePatterns, patterns...)
}
}
files:
for _, fi := range files {
fname := File(fi.Name())
if fi.Name() == ".git" {
continue
}
for _, pattern := range ignorePatterns {
var name File
if parent == "" {
name = fname
} else {
name = parent + "/" + fname
}
if pattern.Matches(name.String(), fi.IsDir()) {
continue files
}
}
if fi.IsDir() {
if !recursedir {
// This isn't very efficient, but lets us implement git ls-files --directory
// without too many changes.
indexPath, err := (parent + "/" + fname).IndexPath(c)
if err != nil {
panic(err)
}
dirHasTracked := false
for path := range tracked {
if strings.HasPrefix(path.String(), indexPath.String()) {
dirHasTracked = true
break
}
}
if !dirHasTracked {
if opts.Directory {
if opts.NoEmptyDirectory {
if files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {
continue
}
}
indexPath += "/"
}
untracked = append(untracked, &IndexEntry{PathName: indexPath})
continue
}
}
var newparent, newdir File
if parent == "" {
newparent = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files {
indexPath, err := file.IndexPath(c)
if err != nil {
return nil, err
}
if _, ok := filesInIndex[indexPath]; !ok {
return nil, fmt.Errorf("error: pathspec '%v' did not match any file(s) known to git", file)
}
}
}
if opt.Others {
wd := File(c.WorkDir)
ignorePatterns := []IgnorePattern{}
if opt.ExcludeStandard {
opt.ExcludeFiles = append(opt.ExcludeFiles, File(filepath.Join(c.GitDir.String(), "info/exclude")))
opt.ExcludePerDirectory = append(opt.ExcludePerDirectory, ".gitignore")
}
for _, file := range opt.ExcludeFiles {
patterns, err := ParseIgnorePatterns(c, file, "")
if err != nil {
return nil, err
}
ignorePatterns = append(ignorePatterns, patterns...)
}
for _, pattern := range opt.ExcludePatterns {
ignorePatterns = append(ignorePatterns, IgnorePattern{Pattern: pattern, Source: "", LineNum: 1, Scope: ""})
}
others := findUntrackedFilesFromDir(c, opt, wd+"/", wd, wd, filesInIndex, !opt.Directory, ignorePatterns)
for _, file := range others {
f, err := file.PathName.FilePath(c)
if err != nil {
return nil, err
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
}
if skip {
continue
}
}
fs = append(fs, LsFilesResult{file, '?'})
}
}
sort.Sort(lsByPath(fs))
return fs, nil
}
// Implement the sort interface on *GitIndexEntry, so that
// it's easy to sort by name.
type lsByPath []LsFilesResult
func (g lsByPath) Len() int { return len(g) }
func (g lsByPath) Swap(i, j int) { g[i], g[j] = g[j], g[i] }
func (g lsByPath) Less(i, j int) bool {
if g[i].PathName == g[j].PathName {
return g[i].Stage() < g[j].Stage()
}
ibytes := []byte(g[i].PathName)
jbytes := []byte(g[j].PathName)
for k := range ibytes {
if k >= len(jbytes) {
// We reached the end of j and there was stuff
// leftover in i, so i > j
return false
}
// If a character is not equal, return if it's
// less or greater
if ibytes[k] < jbytes[k] {
return true
} else if ibytes[k] > jbytes[k] {
return false
}
}
// Everything equal up to the end of i, and there is stuff
// left in j, so i < j
return true
} | files, err := ioutil.ReadDir(dir.String())
if err != nil { | random_line_split |
plot_all_sampled_days.py | # with centroid and angle displayed
import mpl_toolkits.basemap as bm
from mpl_toolkits.basemap import Basemap, cm
from netCDF4 import Dataset as NetCDFFile
import numpy as np
import matplotlib.pyplot as plt
import sys,os
cwd=os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd+'/../../MetBot')
sys.path.append(cwd+'/../../RTools')
sys.path.append(cwd+'/../')
import PlotTools as pt
import MetBot.dset_dict as dsetdict
import dsets_paper_28_4plot as dset_mp
import MetBot.dimensions_dict as dim_exdict
import MetBot.mytools as my
import MetBot.mynetcdf as mync
import MetBot.SynopticAnatomy as sy
import MetBot.MetBlobs as blb
import time as tm
import datetime
### Running options
size='20'
globv='olr'
postrmm=False
sub='SA'
from_event='first'
sample='blon'
type='cont'
if type == 'cont':
jj = 0
if sample == 'blon':
best_lon = [33,58]
ndays=[50,50]
n_cen = [-22,-22]
s_cen = [-32,-32]
t_ang = [-60,-50]
b_ang = [-25,-15]
f_seas = [11,11]
l_seas = [3,3]
# How many plots do you want?
if size=='20':
nplot=int(size)
xplots=4
yplots=5
### Get directories
bkdir=cwd+"/../../../CTdata/"
botdir=bkdir+"metbot_multi_dset/"
thisdir=bkdir+"hpaperplot/plot_all_sampled_days"
my.mkdir_p(thisdir)
### Multi dset?
dsets='spec' # "all" or "spec" to choose specific dset(s)
if dsets=='all':
ndset=len(dset_mp.dset_deets)
dsetnames=list(dset_mp.dset_deets)
elif dsets=='spec': # edit for the dset you want
#ndset=1
#dsetnames=['ncep']
ndset=1
dsetnames=['cmip5']
ndstr=str(ndset)
for d in range(ndset):
dset=dsetnames[d]
dcnt=str(d+1)
print 'Running on '+dset
print 'This is dset '+dcnt+' of '+ndstr+' in list'
outdir=thisdir+'/'+dset+'/'
my.mkdir_p(outdir)
### Multi model?
mods = 'spec' # "all" or "spec" to choose specific model(s)
if mods == 'all':
nmod = len(dset_mp.dset_deets[dset])
mnames = list(dset_mp.dset_deets[dset])
if mods == 'spec': # edit for the models you want
nmod = 1
mnames = ['ACCESS1-0']
#nmod=5
#mnames=['ACCESS1-0','bcc-csm1-1-m','CanESM2','GFDL-CM3','MIROC-ESM']
nmstr = str(nmod)
for m in range(nmod):
name = mnames[m]
mcnt = str(m + 1)
print 'Running on ' + name
print 'This is model ' + mcnt + ' of ' + nmstr + ' in list'
# Get info
moddct = dsetdict.dset_deets[dset][name]
vnamedict = globv + 'name'
varstr = moddct[vnamedict]
ys = moddct['yrfname']
dimdict = dim_exdict.dim_deets[globv][dset]
latname = dimdict[1]
lonname = dimdict[2]
# Open olr file
olrfile=botdir+dset+'/'+name+'.'+globv+\
'.day.mean.'+ys+'.nc'
print 'Opening '+olrfile
ncout = mync.open_multi(olrfile, globv, name, \
dataset=dset, subs=sub)
ndim = len(ncout)
if ndim == 5:
olrdata, time, lat, lon, dtime = ncout
elif ndim == 6:
olrdata, time, lat, lon, lev, dtime = ncout
olrdata = np.squeeze(olrdata)
else:
print 'Check number of dims in ncfile'
dtime[:, 3] = 0
# Select dates with TTCBs only
threshtxt = botdir + 'thresholds.fmin.all_dset.txt'
print threshtxt
with open(threshtxt) as f:
for line in f:
if dset + '\t' + name in line:
thresh = line.split()[2]
print 'thresh=' + str(thresh)
thresh = int(thresh)
thisthresh = thresh
thre_str = str(int(thisthresh))
### Open synop file
sydir=botdir+dset+'/'+name+'/'
sysuf=sydir+name+'_'
mbsfile = sysuf + thre_str + '_' + dset + "-olr-0-0.mbs"
refmbs, refmbt, refch = blb.mbopen(mbsfile)
refmbt[:,3]=0
if from_event == 'first':
syfile = sysuf + thre_str + '_' + dset + '-OLR.synop'
s = sy.SynopticEvents((), [syfile], COL=False)
ks = s.events.keys()
ks.sort()
refkey = s.mbskeys[0]
count_all = str(int(len(ks)))
print "Total CBs flagged =" + str(count_all)
ev_dts = []
ev_keys = []
ev_cXs = []
for k in ks:
e = s.events[k]
dts = s.blobs[refkey]['mbt'][e.ixflags]
for dt in range(len(dts)):
x, y = e.trkcX[dt], e.trkcY[dt]
ev_dts.append(dts[dt])
ev_keys.append(k)
ev_cXs.append(x)
ev_dts = np.asarray(ev_dts)
ev_dts[:, 3] = 0
ev_keys = np.asarray(ev_keys)
ev_cXs = np.asarray(ev_cXs)
### Get array of centroids and angles
edts = []
cXs = []
cYs = []
degs = []
mons = []
for b in range(len(refmbt)):
date = refmbt[b]
mon = int(date[1])
cX = refmbs[b, 3]
cY = refmbs[b, 4]
deg = refmbs[b, 2]
if from_event == 'all':
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
elif from_event == 'first':
# print 'Checking if the date is the first day of an event'
ix = my.ixdtimes(ev_dts, [date[0]], \
[date[1]], [date[2]], [0])
if len(ix) == 1:
key = ev_keys[ix]
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
elif len(ix) > 1:
# print 'there is more than one event on this day'
# print 'lets find the centroid that matches'
todays_cXs = ev_cXs[ix]
index2 = np.where(todays_cXs == cX)[0]
if len(index2) != 1:
print 'Error - centroid not matching'
index3 = ix[index2]
key = ev_keys[index3]
# print 'selecting event with matching centroid'
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
# print 'but is it the first date?'
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
edts = np.asarray(edts)
edts[:, 3] = 0
cXs = np.asarray(cXs)
cYs = np.asarray(cYs)
degs = np.asarray(degs)
mons = np.asarray(mons)
# Select the dates for 50 closest
### Loop flagged days and select those with certain angle and centroid
print 'looping flagged days to find good centroids and angles'
tmp_edts = []
if sample == 'blon' or sample == 'blon2':
tmp_cXs = []
tmp_cYs = []
tmp_degs = []
tmp_mons = []
for b in range(len(edts)):
date = edts[b]
mon = mons[b]
cX = cXs[b]
cY = cYs[b]
deg = degs[b]
# Check on the month
if mon >= f_seas[jj] or mon <= l_seas[jj]:
if sample == 'blon' or sample == 'blon2':
# Check on the latitude of centroid
if cY > s_cen[jj] and cY < n_cen[jj]:
# Check on the angle
if deg > t_ang[jj] and deg < b_ang[jj]:
tmp_edts.append(date)
tmp_cXs.append(cX)
tmp_cYs.append(cY)
tmp_degs.append(deg)
tmp_mons.append(mon)
tmp_edts = np.asarray(tmp_edts)
tmp_edts[:, 3] = 0
if sample == 'blon' or sample == 'blon2':
tmp_cXs = np.asarray(tmp_cXs)
dists = best_lon[jj] - tmp_cXs
tmp_cYs = np.asarray(tmp_cYs)
tmp_degs = np.asarray(tmp_degs)
tmp_mons = np.asarray(tmp_mons)
abs_dists = np.absolute(dists)
inds = np.argsort(abs_dists)
dists_sort = abs_dists[inds]
first50_ind = inds[0:ndays[jj]]
# Get a key for each one so we can re-order
shlist_len=len(tmp_edts)
mykeys=np.arange(0,shlist_len,1)
print mykeys
edts_50 = tmp_edts[first50_ind]
cXs_50 = tmp_cXs[first50_ind]
cYs_50 = tmp_cYs[first50_ind]
degs_50 = tmp_degs[first50_ind]
mykeys_sel=mykeys[first50_ind]
print mykeys_sel
# Reorder
keyinds=np.argsort(mykeys_sel)
smp_edts = edts_50[keyinds]
smp_cXs = cXs_50[keyinds]
smp_cYs = cYs_50[keyinds]
smp_degs = degs_50[keyinds]
mykeys_final = mykeys_sel[keyinds]
print mykeys_final
print smp_edts
# Find indices from var file
indices_m1 = []
for e in range(len(smp_edts)):
date = smp_edts[e]
ix = my.ixdtimes(dtime, [date[0]], [date[1]], [date[2]], [0])
if len(ix) >= 1:
indices_m1.append(ix)
indices_m1 = np.squeeze(np.asarray(indices_m1))
# Select these dates
olrsel = olrdata[indices_m1, :, :]
dates = dtime[indices_m1]
# Count timesteps
nsteps=len(olrsel[:,0,0])
### Count number of events
count_sel = str(nsteps)
print "Total flagged CBs selected =" + str(count_sel)
# Get lon lat grid
plon, plat = np.meshgrid(lon, lat)
# Loop 20 day intervals and plot
tally=0
nrep=24
for r in range(nrep):
print "repetition no "+str(r)
fd=tally*nplot
ld=fd+nplot
thesedays=olrsel[fd:ld,:,:]
datesel=smp_edts[fd:ld]
cXsel=smp_cXs[fd:ld]
cYsel=smp_cYs[fd:ld]
degsel=smp_degs[fd:ld]
#print datesel
# Set up figure
g, ax = plt.subplots(figsize=[12, 8])
m, f = pt.AfrBasemap(lat, lon, drawstuff=True, prj='cyl', fno=1, rsltn='l')
# Loop these 20 tsteps and make a plot
cnt = 1
for p in range(nplot):
data4plot = np.squeeze(thesedays[p, :, :])
tmp = datesel[p]
datestr = str(tmp[0]) + '.' + str(tmp[1]) + '.' + str(tmp[2])
this_cX=cXsel[p]
this_cY=cYsel[p]
this_deg=degsel[p]
# Plot subplot
plt.subplot(yplots, xplots, cnt)
clevs = np.arange(200, 280, 10)
cm = plt.cm.gray_r
cs = m.contourf(plon, plat, data4plot, clevs, cmap=cm, extend='both')
# Add centroid
#print 'Plotting centroid at lon'+str(this_cX)+' and lat '+str(this_cY)
plt.plot(this_cX,this_cY,'o',c='fuchsia',zorder=1)
# Draw contour angle
ex = np.cos(np.deg2rad(this_deg)) * 6
ey = np.sin(np.deg2rad(this_deg)) * 6
#if this_deg < 0: ey = -ey
#gpx = blb.Geopix((len(lat), len(lon)), lat, lon)
#if hasattr(m, 'drawgreatcircle'):
#cx, cy = gpx.xp2lon(this_cX), gpx.yp2lat(cY)
#ex, ey = gpx.xp2lon(ex), gpx.yp2lat(ey)
# cx, cy = this_cX,this_cY
# mcx, mcy = m(cx, cy)
# mex, mey = m(ex, ey)
# mex2, mey2 = m(-ex, -ey)
#else:
#cx, cy = gpx.xp2lon(this_cX), gpx.yp2lat(this_cY)
#ex, ey = gpx.xp2lon(ex), gpx.yp2lat(ey)
cx, cy = this_cX,this_cY
mcx, mcy, mex, mey = cx, cy, ex, ey
mex2, mey2 = -ex, -ey
plt.arrow(mcx, mcy, mex, mey, fc='cyan', ec='r',zorder=2)
#print 'Trying to plot arrow with...'
#print mcx, mcy, mex, mey
plt.arrow(mcx, mcy, mex2, mey2, fc='cyan', ec='r',zorder=3)
txt = "Tilt: %03.0f" % (this_deg)
plt.text(mcx, mcy, txt, color='c', fontsize=14., fontweight='bold')
# Redraw map
m.drawcountries()
m.drawcoastlines()
plt.title(datestr,fontsize='x-small')
cnt += 1
plt.subplots_adjust(left=0.05, right=0.9, top=0.98, bottom=0.02, wspace=0.1, hspace=0.2)
# Plot cbar
axcl = g.add_axes([0.95, 0.15, 0.02, 0.7])
cbar = plt.colorbar(cs, cax=axcl)
# Save
outname = outdir + 'looped_days.'+str(tally)+'.n' + size + '.' + dset + '.' + name + '.' + globv + \
'.png'
plt.savefig(outname, dpi=150)
plt.close()
tally+=1 | # To plot a maps for flagged CB days
# after selection based on angle and centroid
#
# plotted over a larger domain | random_line_split | |
plot_all_sampled_days.py | # To plot a maps for flagged CB days
# after selection based on angle and centroid
#
# plotted over a larger domain
# with centroid and angle displayed
import mpl_toolkits.basemap as bm
from mpl_toolkits.basemap import Basemap, cm
from netCDF4 import Dataset as NetCDFFile
import numpy as np
import matplotlib.pyplot as plt
import sys,os
cwd=os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd+'/../../MetBot')
sys.path.append(cwd+'/../../RTools')
sys.path.append(cwd+'/../')
import PlotTools as pt
import MetBot.dset_dict as dsetdict
import dsets_paper_28_4plot as dset_mp
import MetBot.dimensions_dict as dim_exdict
import MetBot.mytools as my
import MetBot.mynetcdf as mync
import MetBot.SynopticAnatomy as sy
import MetBot.MetBlobs as blb
import time as tm
import datetime
### Running options
size='20'
globv='olr'
postrmm=False
sub='SA'
from_event='first'
sample='blon'
type='cont'
if type == 'cont':
jj = 0
if sample == 'blon':
best_lon = [33,58]
ndays=[50,50]
n_cen = [-22,-22]
s_cen = [-32,-32]
t_ang = [-60,-50]
b_ang = [-25,-15]
f_seas = [11,11]
l_seas = [3,3]
# How many plots do you want?
if size=='20':
nplot=int(size)
xplots=4
yplots=5
### Get directories
bkdir=cwd+"/../../../CTdata/"
botdir=bkdir+"metbot_multi_dset/"
thisdir=bkdir+"hpaperplot/plot_all_sampled_days"
my.mkdir_p(thisdir)
### Multi dset?
dsets='spec' # "all" or "spec" to choose specific dset(s)
if dsets=='all':
|
elif dsets=='spec': # edit for the dset you want
#ndset=1
#dsetnames=['ncep']
ndset=1
dsetnames=['cmip5']
ndstr=str(ndset)
for d in range(ndset):
dset=dsetnames[d]
dcnt=str(d+1)
print 'Running on '+dset
print 'This is dset '+dcnt+' of '+ndstr+' in list'
outdir=thisdir+'/'+dset+'/'
my.mkdir_p(outdir)
### Multi model?
mods = 'spec' # "all" or "spec" to choose specific model(s)
if mods == 'all':
nmod = len(dset_mp.dset_deets[dset])
mnames = list(dset_mp.dset_deets[dset])
if mods == 'spec': # edit for the models you want
nmod = 1
mnames = ['ACCESS1-0']
#nmod=5
#mnames=['ACCESS1-0','bcc-csm1-1-m','CanESM2','GFDL-CM3','MIROC-ESM']
nmstr = str(nmod)
for m in range(nmod):
name = mnames[m]
mcnt = str(m + 1)
print 'Running on ' + name
print 'This is model ' + mcnt + ' of ' + nmstr + ' in list'
# Get info
moddct = dsetdict.dset_deets[dset][name]
vnamedict = globv + 'name'
varstr = moddct[vnamedict]
ys = moddct['yrfname']
dimdict = dim_exdict.dim_deets[globv][dset]
latname = dimdict[1]
lonname = dimdict[2]
# Open olr file
olrfile=botdir+dset+'/'+name+'.'+globv+\
'.day.mean.'+ys+'.nc'
print 'Opening '+olrfile
ncout = mync.open_multi(olrfile, globv, name, \
dataset=dset, subs=sub)
ndim = len(ncout)
if ndim == 5:
olrdata, time, lat, lon, dtime = ncout
elif ndim == 6:
olrdata, time, lat, lon, lev, dtime = ncout
olrdata = np.squeeze(olrdata)
else:
print 'Check number of dims in ncfile'
dtime[:, 3] = 0
# Select dates with TTCBs only
threshtxt = botdir + 'thresholds.fmin.all_dset.txt'
print threshtxt
with open(threshtxt) as f:
for line in f:
if dset + '\t' + name in line:
thresh = line.split()[2]
print 'thresh=' + str(thresh)
thresh = int(thresh)
thisthresh = thresh
thre_str = str(int(thisthresh))
### Open synop file
sydir=botdir+dset+'/'+name+'/'
sysuf=sydir+name+'_'
mbsfile = sysuf + thre_str + '_' + dset + "-olr-0-0.mbs"
refmbs, refmbt, refch = blb.mbopen(mbsfile)
refmbt[:,3]=0
if from_event == 'first':
syfile = sysuf + thre_str + '_' + dset + '-OLR.synop'
s = sy.SynopticEvents((), [syfile], COL=False)
ks = s.events.keys()
ks.sort()
refkey = s.mbskeys[0]
count_all = str(int(len(ks)))
print "Total CBs flagged =" + str(count_all)
ev_dts = []
ev_keys = []
ev_cXs = []
for k in ks:
e = s.events[k]
dts = s.blobs[refkey]['mbt'][e.ixflags]
for dt in range(len(dts)):
x, y = e.trkcX[dt], e.trkcY[dt]
ev_dts.append(dts[dt])
ev_keys.append(k)
ev_cXs.append(x)
ev_dts = np.asarray(ev_dts)
ev_dts[:, 3] = 0
ev_keys = np.asarray(ev_keys)
ev_cXs = np.asarray(ev_cXs)
### Get array of centroids and angles
edts = []
cXs = []
cYs = []
degs = []
mons = []
for b in range(len(refmbt)):
date = refmbt[b]
mon = int(date[1])
cX = refmbs[b, 3]
cY = refmbs[b, 4]
deg = refmbs[b, 2]
if from_event == 'all':
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
elif from_event == 'first':
# print 'Checking if the date is the first day of an event'
ix = my.ixdtimes(ev_dts, [date[0]], \
[date[1]], [date[2]], [0])
if len(ix) == 1:
key = ev_keys[ix]
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
elif len(ix) > 1:
# print 'there is more than one event on this day'
# print 'lets find the centroid that matches'
todays_cXs = ev_cXs[ix]
index2 = np.where(todays_cXs == cX)[0]
if len(index2) != 1:
print 'Error - centroid not matching'
index3 = ix[index2]
key = ev_keys[index3]
# print 'selecting event with matching centroid'
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
# print 'but is it the first date?'
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
edts = np.asarray(edts)
edts[:, 3] = 0
cXs = np.asarray(cXs)
cYs = np.asarray(cYs)
degs = np.asarray(degs)
mons = np.asarray(mons)
# Select the dates for 50 closest
### Loop flagged days and select those with certain angle and centroid
print 'looping flagged days to find good centroids and angles'
tmp_edts = []
if sample == 'blon' or sample == 'blon2':
tmp_cXs = []
tmp_cYs = []
tmp_degs = []
tmp_mons = []
for b in range(len(edts)):
date = edts[b]
mon = mons[b]
cX = cXs[b]
cY = cYs[b]
deg = degs[b]
# Check on the month
if mon >= f_seas[jj] or mon <= l_seas[jj]:
if sample == 'blon' or sample == 'blon2':
# Check on the latitude of centroid
if cY > s_cen[jj] and cY < n_cen[jj]:
# Check on the angle
if deg > t_ang[jj] and deg < b_ang[jj]:
tmp_edts.append(date)
tmp_cXs.append(cX)
tmp_cYs.append(cY)
tmp_degs.append(deg)
tmp_mons.append(mon)
tmp_edts = np.asarray(tmp_edts)
tmp_edts[:, 3] = 0
if sample == 'blon' or sample == 'blon2':
tmp_cXs = np.asarray(tmp_cXs)
dists = best_lon[jj] - tmp_cXs
tmp_cYs = np.asarray(tmp_cYs)
tmp_degs = np.asarray(tmp_degs)
tmp_mons = np.asarray(tmp_mons)
abs_dists = np.absolute(dists)
inds = np.argsort(abs_dists)
dists_sort = abs_dists[inds]
first50_ind = inds[0:ndays[jj]]
# Get a key for each one so we can re-order
shlist_len=len(tmp_edts)
mykeys=np.arange(0,shlist_len,1)
print mykeys
edts_50 = tmp_edts[first50_ind]
cXs_50 = tmp_cXs[first50_ind]
cYs_50 = tmp_cYs[first50_ind]
degs_50 = tmp_degs[first50_ind]
mykeys_sel=mykeys[first50_ind]
print mykeys_sel
# Reorder
keyinds=np.argsort(mykeys_sel)
smp_edts = edts_50[keyinds]
smp_cXs = cXs_50[keyinds]
smp_cYs = cYs_50[keyinds]
smp_degs = degs_50[keyinds]
mykeys_final = mykeys_sel[keyinds]
print mykeys_final
print smp_edts
# Find indices from var file
indices_m1 = []
for e in range(len(smp_edts)):
date = smp_edts[e]
ix = my.ixdtimes(dtime, [date[0]], [date[1]], [date[2]], [0])
if len(ix) >= 1:
indices_m1.append(ix)
indices_m1 = np.squeeze(np.asarray(indices_m1))
# Select these dates
olrsel = olrdata[indices_m1, :, :]
dates = dtime[indices_m1]
# Count timesteps
nsteps=len(olrsel[:,0,0])
### Count number of events
count_sel = str(nsteps)
print "Total flagged CBs selected =" + str(count_sel)
# Get lon lat grid
plon, plat = np.meshgrid(lon, lat)
# Loop 20 day intervals and plot
tally=0
nrep=24
for r in range(nrep):
print "repetition no "+str(r)
fd=tally*nplot
ld=fd+nplot
thesedays=olrsel[fd:ld,:,:]
datesel=smp_edts[fd:ld]
cXsel=smp_cXs[fd:ld]
cYsel=smp_cYs[fd:ld]
degsel=smp_degs[fd:ld]
#print datesel
# Set up figure
g, ax = plt.subplots(figsize=[12, 8])
m, f = pt.AfrBasemap(lat, lon, drawstuff=True, prj='cyl', fno=1, rsltn='l')
# Loop these 20 tsteps and make a plot
cnt = 1
for p in range(nplot):
data4plot = np.squeeze(thesedays[p, :, :])
tmp = datesel[p]
datestr = str(tmp[0]) + '.' + str(tmp[1]) + '.' + str(tmp[2])
this_cX=cXsel[p]
this_cY=cYsel[p]
this_deg=degsel[p]
# Plot subplot
plt.subplot(yplots, xplots, cnt)
clevs = np.arange(200, 280, 10)
cm = plt.cm.gray_r
cs = m.contourf(plon, plat, data4plot, clevs, cmap=cm, extend='both')
# Add centroid
#print 'Plotting centroid at lon'+str(this_cX)+' and lat '+str(this_cY)
plt.plot(this_cX,this_cY,'o',c='fuchsia',zorder=1)
# Draw contour angle
ex = np.cos(np.deg2rad(this_deg)) * 6
ey = np.sin(np.deg2rad(this_deg)) * 6
#if this_deg < 0: ey = -ey
#gpx = blb.Geopix((len(lat), len(lon)), lat, lon)
#if hasattr(m, 'drawgreatcircle'):
#cx, cy = gpx.xp2lon(this_cX), gpx.yp2lat(cY)
#ex, ey = gpx.xp2lon(ex), gpx.yp2lat(ey)
# cx, cy = this_cX,this_cY
# mcx, mcy = m(cx, cy)
# mex, mey = m(ex, ey)
# mex2, mey2 = m(-ex, -ey)
#else:
#cx, cy = gpx.xp2lon(this_cX), gpx.yp2lat(this_cY)
#ex, ey = gpx.xp2lon(ex), gpx.yp2lat(ey)
cx, cy = this_cX,this_cY
mcx, mcy, mex, mey = cx, cy, ex, ey
mex2, mey2 = -ex, -ey
plt.arrow(mcx, mcy, mex, mey, fc='cyan', ec='r',zorder=2)
#print 'Trying to plot arrow with...'
#print mcx, mcy, mex, mey
plt.arrow(mcx, mcy, mex2, mey2, fc='cyan', ec='r',zorder=3)
txt = "Tilt: %03.0f" % (this_deg)
plt.text(mcx, mcy, txt, color='c', fontsize=14., fontweight='bold')
# Redraw map
m.drawcountries()
m.drawcoastlines()
plt.title(datestr,fontsize='x-small')
cnt += 1
plt.subplots_adjust(left=0.05, right=0.9, top=0.98, bottom=0.02, wspace=0.1, hspace=0.2)
# Plot cbar
axcl = g.add_axes([0.95, 0.15, 0.02, 0.7])
cbar = plt.colorbar(cs, cax=axcl)
# Save
outname = outdir + 'looped_days.'+str(tally)+'.n' + size + '.' + dset + '.' + name + '.' + globv + \
'.png'
plt.savefig(outname, dpi=150)
plt.close()
tally+=1 | ndset=len(dset_mp.dset_deets)
dsetnames=list(dset_mp.dset_deets) | conditional_block |
lib.rs | //!
//! Dynamic, plugin-based [Symbol](https://en.wikipedia.org/wiki/Symbol_(programming)) abstraction.
//!
//! A [Symbol] can be used as an _identifier_ in place of the more primitive workhorse [String].
//! There could be multiple reasons to do so:
//!
//! 1. Mixing of different domains in the same runtime code
//! 2. Handling of naming collisions in multiple namespaces
//! 3. Avoiding memory allocations for statically known namespaces
//! 4. Mix of static and dynamic allocation
//! 5. Associating metadata to the symbols themselves
//!
//! The main use case for symbols is as map keys for in-memory key/value stores.
//!
//! Note that there are probably more reasons _not_ to use symbols than to use them! In most cases, something like
//! `enum` or [String] will do just fine. But sometimes applications process a lot of semi-schematic external input,
//! and you just want Rust to work like any old dynamic programming language again.
//!
//! # Example use cases
//! * Namespaced XML/HTML attributes (in HTML, some are static and some are dynamic. i.e. `data-` attributes)
//! * Key/value stores for "anything"
//! * Some way to abstract away string interners? (this is untested)
//!
//! A [Symbol] is just one plain, non-generic type, that can represent all possible symbol values. It implements all traits to make it
//! usable as a key in maps.
//!
//! # What this crate does not do
//! * Serialization and deserialization of symbols. [Symbol] should not implement `serde` traits, ser/de should instead be handled by each namespace.
//! * Provide any namespaces.
//!
//! # Static symbols
//! Static symbols originate from a namespace where all possible values are statically known at compile time.
//! One instance of a static symbol requires no memory allocation.
//!
//! Creating a static namespace:
//!
//! ```
//! use dyn_symbol::*;
//!
//! struct MyStaticNS {
//! symbols: &'static [(&'static str, &'static str)],
//! }
//!
//! const MY_STATIC_NS: MyStaticNS = MyStaticNS {
//! symbols: &[
//! ("foo", "the first symbol!"),
//! ("bar", "the second symbol!")
//! ]
//! };
//!
//! impl dyn_symbol::namespace::Static for MyStaticNS {
//! fn namespace_name(&self) -> &str {
//! "my"
//! }
//!
//! fn symbol_name(&self, id: u32) -> &str {
//! self.symbols[id as usize].0
//! }
//! }
//!
//! // Define (and export) some symbol constants
//! pub const FOO: Symbol = Symbol::Static(&MY_STATIC_NS, 0);
//! pub const BAR: Symbol = Symbol::Static(&MY_STATIC_NS, 1);
//!
//! assert_eq!(FOO, FOO);
//! assert_eq!(FOO.clone(), FOO.clone());
//! assert_ne!(FOO, BAR);
//! assert_eq!(format!("{:?}", FOO), "my::foo");
//!
//! // We can find the originating namespace later:
//! assert!(FOO.downcast_static::<MyStaticNS>().is_some());
//!
//! // To implement special metadata-extraction (or similar functionality) for a namespace:
//! fn get_symbol_description(symbol: &Symbol) -> Result<&'static str, &'static str> {
//! if let Some((namespace, id)) = symbol.downcast_static::<MyStaticNS>() {
//! Ok(namespace.symbols[id as usize].1)
//! } else {
//! Err("not from this namespace :(")
//! }
//! }
//!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> { ... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T: 'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T: 'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self { | f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn test_equality() {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
}
#[test]
fn test_inequality() {
let test_state = TestState::new();
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_B_0);
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym0("bar"));
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym1("foo"));
}
#[test]
fn test_ord() {
assert_ne!(STATIC_A_0.cmp(&STATIC_A_1), Ordering::Equal);
assert_ne!(STATIC_A_0.cmp(&STATIC_B_0), Ordering::Equal);
assert_ne!(STATIC_A_1.cmp(&STATIC_B_0), Ordering::Equal);
}
} | Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!( | random_line_split |
lib.rs | //!
//! Dynamic, plugin-based [Symbol](https://en.wikipedia.org/wiki/Symbol_(programming)) abstraction.
//!
//! A [Symbol] can be used as an _identifier_ in place of the more primitive workhorse [String].
//! There could be multiple reasons to do so:
//!
//! 1. Mixing of different domains in the same runtime code
//! 2. Handling of naming collisions in multiple namespaces
//! 3. Avoiding memory allocations for statically known namespaces
//! 4. Mix of static and dynamic allocation
//! 5. Associating metadata to the symbols themselves
//!
//! The main use case for symbols is as map keys for in-memory key/value stores.
//!
//! Note that there are probably more reasons _not_ to use symbols than to use them! In most cases, something like
//! `enum` or [String] will do just fine. But sometimes applications process a lot of semi-schematic external input,
//! and you just want Rust to work like any old dynamic programming language again.
//!
//! # Example use cases
//! * Namespaced XML/HTML attributes (in HTML, some are static and some are dynamic. i.e. `data-` attributes)
//! * Key/value stores for "anything"
//! * Some way to abstract away string interners? (this is untested)
//!
//! A [Symbol] is just one plain, non-generic type, that can represent all possible symbol values. It implements all traits to make it
//! usable as a key in maps.
//!
//! # What this crate does not do
//! * Serialization and deserialization of symbols. [Symbol] should not implement `serde` traits, ser/de should instead be handled by each namespace.
//! * Provide any namespaces.
//!
//! # Static symbols
//! Static symbols originate from a namespace where all possible values are statically known at compile time.
//! One instance of a static symbol requires no memory allocation.
//!
//! Creating a static namespace:
//!
//! ```
//! use dyn_symbol::*;
//!
//! struct MyStaticNS {
//! symbols: &'static [(&'static str, &'static str)],
//! }
//!
//! const MY_STATIC_NS: MyStaticNS = MyStaticNS {
//! symbols: &[
//! ("foo", "the first symbol!"),
//! ("bar", "the second symbol!")
//! ]
//! };
//!
//! impl dyn_symbol::namespace::Static for MyStaticNS {
//! fn namespace_name(&self) -> &str {
//! "my"
//! }
//!
//! fn symbol_name(&self, id: u32) -> &str {
//! self.symbols[id as usize].0
//! }
//! }
//!
//! // Define (and export) some symbol constants
//! pub const FOO: Symbol = Symbol::Static(&MY_STATIC_NS, 0);
//! pub const BAR: Symbol = Symbol::Static(&MY_STATIC_NS, 1);
//!
//! assert_eq!(FOO, FOO);
//! assert_eq!(FOO.clone(), FOO.clone());
//! assert_ne!(FOO, BAR);
//! assert_eq!(format!("{:?}", FOO), "my::foo");
//!
//! // We can find the originating namespace later:
//! assert!(FOO.downcast_static::<MyStaticNS>().is_some());
//!
//! // To implement special metadata-extraction (or similar functionality) for a namespace:
//! fn get_symbol_description(symbol: &Symbol) -> Result<&'static str, &'static str> {
//! if let Some((namespace, id)) = symbol.downcast_static::<MyStaticNS>() {
//! Ok(namespace.symbols[id as usize].1)
//! } else {
//! Err("not from this namespace :(")
//! }
//! }
//!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> { ... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T: 'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T: 'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!(
f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn test_equality() |
#[test]
fn test_inequality() {
let test_state = TestState::new();
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_B_0);
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym0("bar"));
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym1("foo"));
}
#[test]
fn test_ord() {
assert_ne!(STATIC_A_0.cmp(&STATIC_A_1), Ordering::Equal);
assert_ne!(STATIC_A_0.cmp(&STATIC_B_0), Ordering::Equal);
assert_ne!(STATIC_A_1.cmp(&STATIC_B_0), Ordering::Equal);
}
}
| {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
} | identifier_body |
lib.rs | //!
//! Dynamic, plugin-based [Symbol](https://en.wikipedia.org/wiki/Symbol_(programming)) abstraction.
//!
//! A [Symbol] can be used as an _identifier_ in place of the more primitive workhorse [String].
//! There could be multiple reasons to do so:
//!
//! 1. Mixing of different domains in the same runtime code
//! 2. Handling of naming collisions in multiple namespaces
//! 3. Avoiding memory allocations for statically known namespaces
//! 4. Mix of static and dynamic allocation
//! 5. Associating metadata to the symbols themselves
//!
//! The main use case for symbols is as map keys for in-memory key/value stores.
//!
//! Note that there are probably more reasons _not_ to use symbols than to use them! In most cases, something like
//! `enum` or [String] will do just fine. But sometimes applications process a lot of semi-schematic external input,
//! and you just want Rust to work like any old dynamic programming language again.
//!
//! # Example use cases
//! * Namespaced XML/HTML attributes (in HTML, some are static and some are dynamic. i.e. `data-` attributes)
//! * Key/value stores for "anything"
//! * Some way to abstract away string interners? (this is untested)
//!
//! A [Symbol] is just one plain, non-generic type, that can represent all possible symbol values. It implements all traits to make it
//! usable as a key in maps.
//!
//! # What this crate does not do
//! * Serialization and deserialization of symbols. [Symbol] should not implement `serde` traits, ser/de should instead be handled by each namespace.
//! * Provide any namespaces.
//!
//! # Static symbols
//! Static symbols originate from a namespace where all possible values are statically known at compile time.
//! One instance of a static symbol requires no memory allocation.
//!
//! Creating a static namespace:
//!
//! ```
//! use dyn_symbol::*;
//!
//! struct MyStaticNS {
//! symbols: &'static [(&'static str, &'static str)],
//! }
//!
//! const MY_STATIC_NS: MyStaticNS = MyStaticNS {
//! symbols: &[
//! ("foo", "the first symbol!"),
//! ("bar", "the second symbol!")
//! ]
//! };
//!
//! impl dyn_symbol::namespace::Static for MyStaticNS {
//! fn namespace_name(&self) -> &str {
//! "my"
//! }
//!
//! fn symbol_name(&self, id: u32) -> &str {
//! self.symbols[id as usize].0
//! }
//! }
//!
//! // Define (and export) some symbol constants
//! pub const FOO: Symbol = Symbol::Static(&MY_STATIC_NS, 0);
//! pub const BAR: Symbol = Symbol::Static(&MY_STATIC_NS, 1);
//!
//! assert_eq!(FOO, FOO);
//! assert_eq!(FOO.clone(), FOO.clone());
//! assert_ne!(FOO, BAR);
//! assert_eq!(format!("{:?}", FOO), "my::foo");
//!
//! // We can find the originating namespace later:
//! assert!(FOO.downcast_static::<MyStaticNS>().is_some());
//!
//! // To implement special metadata-extraction (or similar functionality) for a namespace:
//! fn get_symbol_description(symbol: &Symbol) -> Result<&'static str, &'static str> {
//! if let Some((namespace, id)) = symbol.downcast_static::<MyStaticNS>() {
//! Ok(namespace.symbols[id as usize].1)
//! } else {
//! Err("not from this namespace :(")
//! }
//! }
//!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> { ... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T: 'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T: 'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!(
f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn | () {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
}
#[test]
fn test_inequality() {
let test_state = TestState::new();
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_B_0);
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym0("bar"));
test_state.assert_full_ne(&dynamic::sym0("foo"), &dynamic::sym1("foo"));
}
#[test]
fn test_ord() {
assert_ne!(STATIC_A_0.cmp(&STATIC_A_1), Ordering::Equal);
assert_ne!(STATIC_A_0.cmp(&STATIC_B_0), Ordering::Equal);
assert_ne!(STATIC_A_1.cmp(&STATIC_B_0), Ordering::Equal);
}
}
| test_equality | identifier_name |
xapian_backend.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import copy
import xapian
import cPickle as pickle
import simplejson as json
import pymongo
import scws
import datetime
import calendar
from itertools import product
from argparse import ArgumentParser
SCWS_ENCODING = 'utf-8'
SCWS_RULES = '/usr/local/scws/etc/rules.utf8.ini'
CHS_DICT_PATH = '/usr/local/scws/etc/dict.utf8.xdb'
CHT_DICT_PATH = '/usr/local/scws/etc/dict_cht.utf8.xdb'
CUSTOM_DICT_PATH = '../dict/userdic.txt'
IGNORE_PUNCTUATION = 1
EXTRA_STOPWORD_PATH = '../dict/stopword.dic'
EXTRA_EMOTIONWORD_PATH = '../dict/emotionlist.txt'
PROCESS_IDX_SIZE = 100000
SCHEMA_VERSION = 1
DOCUMENT_ID_TERM_PREFIX = 'M'
DOCUMENT_CUSTOM_TERM_PREFIX = 'X'
OPERATIONINT2STR = {
'0': 'AND',
'1': 'AND_NOT',
'2': 'OR',
'3': 'XOR',
'4': 'NOT',
}
class XapianIndex(object):
def __init__(self, dbpath, schema_version):
self.path = dbpath
self.schema = getattr(Schema, 'v%s' % schema_version, None)
self.databases = {}
self.load_scws()
self.load_mongod()
self.load_extra_dic()
def document_count(self, folder):
try:
return _database(folder).get_doccount()
except InvalidIndexError:
return 0
def generate(self, start_time=None):
folders_with_date = []
if not debug and start_time:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d')
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
elif debug:
start_time = datetime.datetime(2009, 8, 1)
step_time = datetime.timedelta(days=50)
while start_time < datetime.datetime.today():
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
start_time += step_time
self.folders_with_date = folders_with_date
def load_extra_dic(self):
self.emotion_words = [line.strip('\r\n') for line in file(EXTRA_EMOTIONWORD_PATH)]
def load_scws(self):
s = scws.Scws()
s.set_charset(SCWS_ENCODING)
s.set_dict(CHS_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CHT_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CUSTOM_DICT_PATH, scws.XDICT_TXT)
# 把停用词全部拆成单字,再过滤掉单字,以达到去除停用词的目的
s.add_dict(EXTRA_STOPWORD_PATH, scws.XDICT_TXT)
# 即基于表情表对表情进行分词,必要的时候在返回结果处或后剔除
s.add_dict(EXTRA_EMOTIONWORD_PATH, scws.XDICT_TXT)
s.set_rules(SCWS_RULES)
s.set_ignore(IGNORE_PUNCTUATION)
self.s = s
def load_mongod(self):
connection = pymongo.Connection()
db = connection.admin
db.authenticate('root', 'root')
db = connection.weibo
self.db = db
def get_database(self, folder):
if folder not in self.databases:
self.databases[folder] = _database(folder, writable=True)
retur | start_time = self.folders_with_date[0][0]
end_time = start_time + datetime.timedelta(days=50)
weibos = self.db.statuses.find({
self.schema['posted_at_key']: {
'$gte': calendar.timegm(start_time.timetuple()),
'$lt': calendar.timegm(end_time.timetuple())
}
}, timeout=False)
print 'prod mode: loaded weibos from mongod'
elif debug:
with open("../test/sample_tweets.js") as f:
weibos = json.loads(f.readline())
print 'debug mode: loaded weibos from file'
count = 0
try:
for weibo in weibos:
count += 1
posted_at = datetime.datetime.fromtimestamp(weibo[self.schema['posted_at_key']])
if not debug and start_time:
folder = self.folders_with_date[0][1]
elif debug:
for i in xrange(len(self.folders_with_date) - 1):
if self.folders_with_date[i][0] <= posted_at < self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i][1]
break
else:
if posted_at >= self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i + 1][1]
self.update(folder, weibo)
if count % PROCESS_IDX_SIZE == 0:
print '[%s] folder[%s] num indexed: %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), folder, count)
except Exception:
raise
finally:
for database in self.databases.itervalues():
database.close()
for _, folder in self.folders_with_date:
print 'index size', folder, self.document_count(folder)
def update(self, folder, weibo):
document = xapian.Document()
document_id = DOCUMENT_ID_TERM_PREFIX + weibo[self.schema['obj_id']]
for field in self.schema['idx_fields']:
self.index_field(field, document, weibo, SCHEMA_VERSION)
document.set_data(pickle.dumps(
weibo, pickle.HIGHEST_PROTOCOL
))
document.add_term(document_id)
self.get_database(folder).replace_document(document_id, document)
def index_field(self, field, document, weibo, schema_version):
prefix = DOCUMENT_CUSTOM_TERM_PREFIX + field['field_name'].upper()
if schema_version == 1:
if field['field_name'] in ['uid', 'name']:
term = _marshal_term(weibo[field['field_name']])
document.add_term(prefix + term)
elif field['field_name'] == 'ts':
document.add_value(field['column'], _marshal_value(weibo[field['field_name']]))
elif field['field_name'] == 'text':
tokens = [token[0] for token
in self.s.participle(weibo[field['field_name']].encode('utf-8'))
if len(token[0]) > 1]
for token in tokens:
if len(token) <= 10:
document.add_term(prefix + token)
document.add_value(field['column'], weibo[field['field_name']])
class XapianSearch(object):
def __init__(self, path='../data/', name='statuses', schema_version=SCHEMA_VERSION):
def create(dbpath):
return xapian.Database(dbpath)
def merge(db1, db2):
db1.add_database(db2)
return db1
self.database = reduce(merge,
map(create,
[path + p for p in os.listdir(path) if p.startswith('_%s' % name)]))
self.schema = getattr(Schema, 'v%s' % schema_version, None)
def parse_query(self, query_dict):
"""
Given a `query_dict`, will attempt to return a xapian.Query
Required arguments:
``query_dict`` -- A query dict similar to MongoDB style to parse
Returns a xapian.Query
Operator Reference:
Comparison:
equal, key = value, { key:value }
$lt, $gt, the field less or more than the specified value, { field: { $lt: value, $gt: value } }
Logical:
$and, perform logical AND operation in expressions, { $and: [{ <expression1> } , { <expression2> },
... , { <expressionN> }] }
$or, perform logical OR operation in expressions like the $and operation
$xor, perform logical XOR operation in expressions like the $and operation
$not, perform logical NOT operation in experssions, which get the conjunction of both negative
experssions, { $not: { <expression1> }, { <expression2> }, ... { <expressionN> } }
PS: if not any operation is specified, the logical AND operation is the default operation
(An implicit AND operation is performed when specifying a comma separated list of expressions).
See more query examples in test files.
"""
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
def build_query_tree(self, query_dict):
"""将字典转成语法树"""
ops = ['$not']
bi_ops = ['$or', '$and', '$xor']
def op(a, b, operation):
if operation == '$and':
return a & b
elif operation == '$or':
return a | b
elif operation == '$xor':
return a ^ b
else:
raise OperationError('Operation %s cannot be processed.' % operation)
def grammar_tree(query_dict):
total_query = Q()
for k in query_dict.keys():
if k in bi_ops:
#deal with expression without operator
bi_query = reduce(lambda a, b: op(a, b, k),
map(lambda expr: Q(**expr),
filter(lambda expr: not (set(expr.keys()) & set(ops + bi_ops)), query_dict[k])), Q())
#deal with nested expression
nested_query = reduce(lambda a, b: op(a, b, k),
map(lambda query_dict: grammar_tree(query_dict),
filter(lambda expr: set(expr.keys()) & set(ops + bi_ops), query_dict[k])), Q())
if nested_query:
total_query &= op(bi_query, nested_query, k)
else:
total_query &= bi_query
elif k in ops:
if k == '$not':
not_dict = {}
#nested_query_dict = {}
for not_k in query_dict[k]:
if not_k not in ops + bi_ops:
not_dict[not_k] = query_dict[k][not_k]
else:
pass
#nested query in a $not statement is not implemented
#nested_query_dict.update({not_k: query_dict[k][not_k]})
not_query = notQ(**not_dict)
total_query &= not_query
else:
total_query &= Q(**{k: query_dict[k]})
return total_query
total_query = grammar_tree(query_dict)
return total_query
def search(self, query=None, sort_by=None, start_offset=0,
max_offset=1000, fields=None, **kwargs):
query = self.parse_query(query)
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
database = self.database
enquire = xapian.Enquire(database)
enquire.set_query(query)
if sort_by:
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False # Reverse is inverted in Xapian -- http://trac.xapian.org/ticket/311
sorter.add(self._value_column(sort_field), reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
results = []
if not max_offset:
max_offset = database.get_doccount() - start_offset
matches = self._get_enquire_mset(database, enquire, start_offset, max_offset)
for match in matches:
weibo = pickle.loads(self._get_document_data(database, match.document))
item = None
if fields is not None: # 如果fields为[], 这情况下,不返回任何一项
item = {}
for field in fields:
item[field] = weibo[field]
else:
item = weibo
results.append(item)
return {
'results': results,
'hits': self._get_hit_count(database, enquire)
}
def _get_enquire_mset(self, database, enquire, start_offset, max_offset):
"""
A safer version of Xapian.enquire.get_mset
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`enquire` -- An instance of an Xapian.enquire object
`start_offset` -- The start offset to pass to `enquire.get_mset`
`max_offset` -- The max offset (maxitems to acquire) to pass to `enquire.get_mset`
"""
try:
return enquire.get_mset(start_offset, max_offset)
except xapian.DatabaseModifiedError:
database.reopen()
return enquire.get_mset(start_offset, max_offset)
def _get_document_data(self, database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data()
def _value_column(self, field):
"""
Private method that returns the column value slot in the database
for a given field.
Required arguemnts:
`field` -- The field to lookup
Returns an integer with the column location (0 indexed).
"""
for field_dict in self.schema['idx_fields']:
if field_dict['field_name'] == field:
return field_dict['column']
return 0
def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size()
def _marshal_value(value):
"""
Private utility method that converts Python values to a string for Xapian values.
"""
if isinstance(value, (int, long)):
value = xapian.sortable_serialise(value)
return value
def _marshal_term(term):
"""
Private utility method that converts Python terms to a string for Xapian terms.
"""
if isinstance(term, int):
term = str(term)
return term
def _database(folder, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if writable:
if debug:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)
else:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(folder)
except xapian.DatabaseOpeningError:
raise InvalidIndexError(u'Unable to open index at %s' % folder)
return database
class InvalidIndexError(Exception):
"""Raised when an index can not be opened."""
pass
class InvalidQueryError(Exception):
"""Raised when a query is illegal."""
pass
class OperationError(Exception):
"""Raised when queries cannot be operated."""
pass
class QNodeVisitor(object):
"""
Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""
Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""
Called by (New)Q objects.
"""
return query
def visit_not_query(self, query):
"""
Called by (New)NOT Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""
Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
"""
The simplification only applies to 'simple' queries
如果最外层的操作符是and,然后里面的每个元素都是一个独自的Q且不是not Q
将所有的Q的query抽出来,到一个query里面来
"""
if all(isinstance(node, Q) and not isinstance(node, notQ)
for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""
Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops & query_ops
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""
Transforms the query tree in to a form that may be more effective used with Xapian.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# Move the ORs up the tree to one 'master' $or.
# Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""
Compiles the nodes in a query tree to a Xapian-compatible query.
"""
def __init__(self, schema, database):
self.schema = schema
self.database = database
def visit_combination(self, combination):
if combination.operation == combination.OR:
return xapian.Query(xapian.Query.OP_OR, combination.children)
elif combination.operation == combination.AND:
return xapian.Query(xapian.Query.OP_AND, combination.children)
elif combination.operation == combination.AND_NOT:
return xapian.Query(xapian.Query.OP_AND_NOT, combination.children)
elif combination.operation == combination.XOR:
return xapian.Query(xapian.Query.OP_XOR, combination.children)
return combination
def visit_not_query(self, query):
new_query = self.visit_query(query, n=True)
#NOT set is the intersection of universal set AND NOT set
new_query = xapian.Query(xapian.Query.OP_AND_NOT, [xapian.Query(''), new_query])
return new_query
def visit_query(self, query, n=False):
query_dict = query.query
qp = xapian.QueryParser()
qp.set_database(self.database)
field_prefix = {}
field_type = {}
field_col = {}
for field_dict in self.schema['idx_fields']:
fname = field_dict['field_name']
field_col[fname] = field_dict['column']
field_type[fname] = field_dict['type']
field_prefix[fname] = DOCUMENT_CUSTOM_TERM_PREFIX + fname.upper()
pre_query = None
new_query = None
for field in query_dict:
if field in field_prefix:
prefix = field_prefix[field]
col = field_col[field]
value = query_dict[field]
if isinstance(value, dict):
ftype = field_type[field]
if ftype == 'int' or ftype == 'long':
begin = value.get('$gt', 0)
end = value.get('$lt', sys.maxint)
qp.add_valuerangeprocessor(xapian.NumberValueRangeProcessor(col, '%s' % prefix))
new_query = qp.parse_query('%s%s..%s' % (prefix, begin, end))
elif not hasattr(value, 'strip') and hasattr(value, '__getitem__') or hasattr(value, '__iter__'):
value = ['%s%s' % (prefix, v) for v in value]
#De Morgan's laws, if we want the intersection of negation sets,
#Firstly, we obtain the disjunction of this sets, then get negation of them
# (AND_NOT [U, (OR, [a, b, c])])
# NOT (a OR B OR C)
# NOT a AND not b AND not C
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, value)
else:
new_query = xapian.Query(xapian.Query.OP_OR, value)
else:
new_query = xapian.Query('%s%s' % (prefix, value))
if pre_query:
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, [pre_query, new_query])
else:
# and_not , [U, a or b])
# not a and not b
new_query = xapian.Query(xapian.Query.OP_OR, [pre_query, new_query])
pre_query = new_query
return new_query
class QNode(object):
"""
Base class for nodes in query trees.
"""
AND = 0
AND_NOT = 1
OR = 2
XOR = 3
NOT = 4
def to_query(self, schema, database):
'''
The query optimization is a bit harder, so we just leave the optimization of query
to user's own judgement and come back to it in the future.
'''
#query = self.accept(SimplificationVisitor())
#query = query.accept(QueryTreeTransformerVisitor())
query = self.accept(QueryCompilerVisitor(schema, database))
return query
def accept(self, visitor):
"""在to_query里被调用,不同子类有不同实现"""
raise NotImplementedError
def _combine(self, other, operation):
"""
Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty'):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
class QCombination(QNode):
"""
Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not self.children
def __repr__(self):
return '%s: (%s, [%s])' % \
(type(self), OPERATIONINT2STR[str(self.operation)], ', '.join([str(x) for x in self.children]))
class Q(QNode):
"""
A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not self.query
def __repr__(self):
return '%s: %s' % (type(self), self.query)
class notQ(Q):
"""
A query object based on simple query object, used in a query tree to
build up NOT query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_not_query(self)
class Schema:
v1 = {
'obj_id': '_id',
'posted_at_key': 'ts',
'idx_fields': [
{'field_name': 'uid', 'column': 0, 'type': 'long'},
{'field_name': 'name', 'column': 1, 'type': 'text'},
{'field_name': 'text', 'column': 2, 'type': 'text'},
{'field_name': 'ts', 'column': 3, 'type': 'long'}
],
}
if __name__ == "__main__":
"""
cd to test/ folder
then run 'py (-m memory_profiler) ../xapian_weibo/xapian_backend.py -d hehe'
http://pypi.python.org/pypi/memory_profiler
"""
parser = ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='DEBUG')
parser.add_argument('-p', '--print_folders', action='store_true', help='PRINT FOLDER THEN EXIT')
parser.add_argument('-s', '--start_time', nargs=1, help='DATETIME')
parser.add_argument('dbpath', help='PATH_TO_DATABASE')
args = parser.parse_args(sys.argv[1:])
debug = args.debug
dbpath = args.dbpath
if args.print_folders:
debug = True
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate()
for _, folder in xapian_indexer.folders_with_date:
print folder
sys.exit(0)
start_time = args.start_time[0] if args.start_time else None
if debug:
if start_time:
print 'debug mode(warning): start_time will not be used'
PROCESS_IDX_SIZE = 10000
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate(start_time)
xapian_indexer.load_and_index_weibos(start_time)
| n self.databases[folder]
#@profile
def load_and_index_weibos(self, start_time=None):
if not debug and start_time:
| identifier_body |
xapian_backend.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import copy
import xapian
import cPickle as pickle
import simplejson as json
import pymongo
import scws
import datetime
import calendar
from itertools import product
from argparse import ArgumentParser
SCWS_ENCODING = 'utf-8'
SCWS_RULES = '/usr/local/scws/etc/rules.utf8.ini'
CHS_DICT_PATH = '/usr/local/scws/etc/dict.utf8.xdb'
CHT_DICT_PATH = '/usr/local/scws/etc/dict_cht.utf8.xdb'
CUSTOM_DICT_PATH = '../dict/userdic.txt'
IGNORE_PUNCTUATION = 1
EXTRA_STOPWORD_PATH = '../dict/stopword.dic'
EXTRA_EMOTIONWORD_PATH = '../dict/emotionlist.txt'
PROCESS_IDX_SIZE = 100000
SCHEMA_VERSION = 1
DOCUMENT_ID_TERM_PREFIX = 'M'
DOCUMENT_CUSTOM_TERM_PREFIX = 'X'
OPERATIONINT2STR = {
'0': 'AND',
'1': 'AND_NOT',
'2': 'OR',
'3': 'XOR',
'4': 'NOT',
}
class XapianIndex(object):
def __init__(self, dbpath, schema_version):
self.path = dbpath
self.schema = getattr(Schema, 'v%s' % schema_version, None)
self.databases = {}
self.load_scws()
self.load_mongod()
self.load_extra_dic()
def document_count(self, folder):
try:
return _database(folder).get_doccount()
except InvalidIndexError:
return 0
def generate(self, start_time=None):
folders_with_date = []
if not debug and start_time:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d')
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
elif debug:
start_time = datetime.datetime(2009, 8, 1)
step_time = datetime.timedelta(days=50)
while start_time < datetime.datetime.today():
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
start_time += step_time
self.folders_with_date = folders_with_date
def load_extra_dic(self):
self.emotion_words = [line.strip('\r\n') for line in file(EXTRA_EMOTIONWORD_PATH)]
def load_scws(self):
s = scws.Scws()
s.set_charset(SCWS_ENCODING)
s.set_dict(CHS_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CHT_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CUSTOM_DICT_PATH, scws.XDICT_TXT)
# 把停用词全部拆成单字,再过滤掉单字,以达到去除停用词的目的
s.add_dict(EXTRA_STOPWORD_PATH, scws.XDICT_TXT)
# 即基于表情表对表情进行分词,必要的时候在返回结果处或后剔除
s.add_dict(EXTRA_EMOTIONWORD_PATH, scws.XDICT_TXT)
s.set_rules(SCWS_RULES)
s.set_ignore(IGNORE_PUNCTUATION)
self.s = s
def load_mongod(self):
connection = pymongo.Connection()
db = connection.admin
db.authenticate('root', 'root')
db = connection.weibo
self.db = db
def get_database(self, folder):
if folder not in self.databases:
self.databases[folder] = _database(folder, writable=True)
return self.databases[folder]
#@profile
def load_and_index_weibos(self, start_time=None):
if not debug and start_time:
start_time = self.folders_with_date[0][0]
end_time = start_time + datetime.timedelta(days=50)
weibos = self.db.statuses.find({
self.schema['posted_at_key']: {
'$gte': calendar.timegm(start_time.timetuple()),
'$lt': calendar.timegm(end_time.timetuple())
}
}, timeout=False)
print 'prod mode: loaded weibos from mongod'
elif debug:
with open("../test/sample_tweets.js") as f:
weibos = json.loads(f.readline())
print 'debug mode: loaded weibos from file'
count = 0
try:
for weibo in weibos:
count += 1
posted_at = datetime.datetime.fromtimestamp(weibo[self.schema['posted_at_key']])
if not debug and start_time:
folder = self.folders_with_date[0][1]
elif debug:
for i in xrange(len(self.folders_with_date) - 1):
if self.folders_with_date[i][0] <= posted_at < self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i][1]
break
else:
if posted_at >= self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i + 1][1]
self.update(folder, weibo)
if count % PROCESS_IDX_SIZE == 0:
print '[%s] folder[%s] num indexed: %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), folder, count)
except Exception:
raise
finally:
for database in self.databases.itervalues():
database.close()
for _, folder in self.folders_with_date:
print 'index size', folder, self.document_count(folder)
def update(self, folder, weibo):
document = xapian.Document()
document_id = DOCUMENT_ID_TERM_PREFIX + weibo[self.schema['obj_id']]
for field in self.schema['idx_fields']:
self.index_field(field, document, weibo, SCHEMA_VERSION)
document.set_data(pickle.dumps(
weibo, pickle.HIGHEST_PROTOCOL
))
document.add_term(document_id)
self.get_database(folder).replace_document(document_id, document)
def index_field(self, field, document, weibo, schema_version):
prefix = DOCUMENT_CUSTOM_TERM_PREFIX + field['field_name'].upper()
if schema_version == 1:
if field['field_name'] in ['uid', 'name']:
term = _marshal_term(weibo[field['field_name']])
document.add_term(prefix + term)
elif field['field_name'] == 'ts':
document.add_value(field['column'], _marshal_value(weibo[field['field_name']]))
elif field['field_name'] == 'text':
tokens = [token[0] for token
in self.s.participle(weibo[field['field_name']].encode('utf-8'))
if len(token[0]) > 1]
for token in tokens:
if len(token) <= 10:
document.add_term(prefix + token)
document.add_value(field['column'], weibo[field['field_name']])
class XapianSearch(object):
def __init__(self, path='../data/', name='statuses', schema_version=SCHEMA_VERSION):
def create(dbpath):
return xapian.Database(dbpath)
def merge(db1, db2):
db1.add_database(db2)
return db1
self.database = reduce(merge,
map(create,
[path + p for p in os.listdir(path) if p.startswith('_%s' % name)]))
self.schema = getattr(Schema, 'v%s' % schema_version, None)
def parse_query(self, query_dict):
"""
Given a `query_dict`, will attempt to return a xapian.Query
Required arguments:
``query_dict`` -- A query dict similar to MongoDB style to parse
Returns a xapian.Query
Operator Reference:
Comparison:
equal, key = value, { key:value }
$lt, $gt, the field less or more than the specified value, { field: { $lt: value, $gt: value } }
Logical:
$and, perform logical AND operation in expressions, { $and: [{ <expression1> } , { <expression2> },
... , { <expressionN> }] }
$or, perform logical OR operation in expressions like the $and operation
$xor, perform logical XOR operation in expressions like the $and operation
$not, perform logical NOT operation in experssions, which get the conjunction of both negative
experssions, { $not: { <expression1> }, { <expression2> }, ... { <expressionN> } }
PS: if not any operation is specified, the logical AND operation is the default operation
(An implicit AND operation is performed when specifying a comma separated list of expressions).
See more query examples in test files.
"""
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
def build_query_tree(self, query_dict):
"""将字典转成语法树"""
ops = ['$not']
bi_ops = ['$or', '$and', '$xor']
def op(a, b, operation):
if operation == '$and':
return a & b
elif operation == '$or':
return a | b
elif operation == '$xor':
return a ^ b
else:
raise OperationError('Operation %s cannot be processed.' % operation)
def grammar_tree(query_dict):
total_query = Q()
for k in query_dict.keys():
if k in bi_ops:
#deal with expression without operator
bi_query = reduce(lambda a, b: op(a, b, k),
map(lambda expr: Q(**expr),
filter(lambda expr: not (set(expr.keys()) & set(ops + bi_ops)), query_dict[k])), Q())
#deal with nested expression
nested_query = reduce(lambda a, b: op(a, b, k),
map(lambda query_dict: grammar_tree(query_dict),
filter(lambda expr: set(expr.keys()) & set(ops + bi_ops), query_dict[k])), Q())
if nested_query:
total_query &= op(bi_query, nested_query, k)
else:
total_query &= bi_query
elif k in ops:
if k == '$not':
not_dict = {}
#nested_query_dict = {}
for not_k in query_dict[k]:
if not_k not in ops + bi_ops:
not_dict[not_k] = query_dict[k][not_k]
else:
pass
#nested query in a $not statement is not implemented
#nested_query_dict.update({not_k: query_dict[k][not_k]})
not_query = notQ(**not_dict)
total_query &= not_query
else:
total_query &= Q(**{k: query_dict[k]})
return total_query
total_query = grammar_tree(query_dict)
return total_query
def search(self, query=None, sort_by=None, start_offset=0,
max_offset=1000, fields=None, **kwargs):
query = self.parse_query(query)
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
database = self.database
enquire = xapian.Enquire(database)
enquire.set_query(query)
if sort_by:
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False # Reverse is inverted in Xapian -- http://trac.xapian.org/ticket/311
sorter.add(self._value_column(sort_field), reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
results = []
if not max_offset:
max_offset = database.get_doccount() - start_offset
matches = self._get_enquire_mset(database, enquire, start_offset, max_offset)
for match in matches:
weibo = pickle.loads(self._get_document_data(database, match.document))
item = None
if fields is not None: # 如果fields为[], 这情况下,不返回任何一项
item = {}
for field in fields:
item[field] = weibo[field]
else:
item = weibo
results.append(item)
return {
'results': results,
'hits': self._get_hit_count(database, enquire)
}
def _get_enquire_mset(self, database, enquire, start_offset, max_offset):
"""
A safer version of Xapian.enquire.get_mset
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`enquire` -- An instance of an Xapian.enquire object
`start_offset` -- The start offset to pass to `enquire.get_mset`
`max_offset` -- The max offset (maxitems to acquire) to pass to `enquire.get_mset`
"""
try:
return enquire.get_mset(start_offset, max_offset)
except xapian.DatabaseModifiedError:
database.reopen()
return enquire.get_mset(start_offset, max_offset)
def _get_document_data(self, database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data()
def _value_column(self, field):
"""
Private method that returns the column value slot in the database
for a given field.
Required arguemnts:
`field` -- The field to lookup
Returns an integer with the column location (0 indexed).
"""
for field_dict in self.schema['idx_fields']:
if field_dict['field_name'] == field:
return field_dict['column']
return 0
def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size()
def _marshal_value(value):
"""
Private utility method that converts Python values to a string for Xapian values.
"""
if isinstance(value, (int, long)):
value = xapian.sortable_serialise(value)
return value
def _marshal_term(term):
"""
Private utility method that converts Python terms to a string for Xapian terms.
"""
if isinstance(term, int):
term = str(term)
return term
def _database(folder, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if writable:
if debug:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)
else:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(folder)
except xapian.DatabaseOpeningError:
raise InvalidIndexError(u'Unable to open index at %s' % folder)
return database
class InvalidIndexError(Exception):
"""Raised when an index can not be opened."""
pass
class InvalidQueryError(Exception):
"""Raised when a query is illegal."""
pass
class OperationError(Exception):
"""Raised when queries cannot be operated."""
pass
class QNodeVisitor(object):
"""
Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""
Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""
Called by (New)Q objects.
"""
return query
def visit_not_query(self, query):
"""
Called by (New)NOT Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""
Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
"""
The simplification only applies to 'simple' queries
如果最外层的操作符是and,然后里面的每个元素都是一个独自的Q且不是not Q
将所有的Q的query抽出来,到一个query里面来
"""
if all(isinstance(node, Q) and not isinstance(node, notQ)
for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""
Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops & query_ops
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""
Transforms the query tree in to a form that may be more effective used with Xapian.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# Move the ORs up the tree to one 'master' $or.
# Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""
Compiles the nodes in a query tree to a Xapian-compatible query.
"""
def __init__(self, schema, database):
self.schema = schema
self.database = database
def visit_combination(self, combination):
if combination.operation == combination.OR:
return xapian.Query(xapian.Query.OP_OR, combination.children)
elif combination.operation == combination.AND:
return xapian.Query(xapian.Query.OP_AND, combination.children)
elif combination.operation == combination.AND_NOT:
return xapian.Query(xapian.Query.OP_AND_NOT, combination.children)
elif combination.operation == combination.XOR:
return xapian.Query(xapian.Query.OP_XOR, combination.children)
return combination
def visit_not_query(self, query):
new_query = self.visit_query(query, n=True)
#NOT set is the intersection of universal set AND NOT set
new_query = xapian.Query(xapian.Query.OP_AND_NOT, [xapian.Query(''), new_query])
return new_query
def visit_query(self, query, n=False):
query_dict = query.query
qp = xapian.QueryParser()
qp.set_database(self.database)
field_prefix = {}
field_type = {}
field_col = {}
for field_dict in self.schema['idx_fields']:
fname = field_dict['field_name']
field_col[fname] = field_dict['column']
field_type[fname] = field_dict['type']
field_prefix[fname] = DOCUMENT_CUSTOM_TERM_PREFIX + fname.upper()
pre_query = None
new_query = None
for field in query_dict:
if field in field_prefix:
prefix = field_prefix[field]
col = field_col[field]
value = query_dict[field]
if isinstance(value, dict):
ftype = field_type[field]
if ftype == 'int' or ftype == 'long':
begin = value.get('$gt', 0)
end = value.get('$lt', sys.maxint)
qp.add_valuerangeprocessor(xapian.NumberValueRangeProcessor(col, '%s' % prefix))
new_query = qp.parse_query('%s%s..%s' % (prefix, begin, end))
elif not hasattr(value, 'strip') and hasattr(value, '__getitem__') or hasattr(value, '__iter__'):
value = ['%s%s' % (prefix, v) for v in value]
#De Morgan's laws, if we want the intersection of negation sets,
#Firstly, we obtain the disjunction of this sets, then get negation of them
# (AND_NOT [U, (OR, [a, b, c])])
# NOT (a OR B OR C)
# NOT a AND not b AND not C
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, value)
else:
new_query = xapian.Query(xapian.Query.OP_OR, value)
else:
new_query = xapian.Query('%s%s' % (prefix, value))
if pre_query:
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, [pre_query, new_query])
else:
# and_not , [U, a or b])
# not a and not b
new_query = xapian.Query(xapian.Query.OP_OR, [pre_query, new_query])
pre_query = new_query
return new_query
class QNode(object):
"""
Base class for nodes in query trees.
"""
AND = 0
AND_NOT = 1
OR = 2
XOR = 3
NOT = 4
def to_query(self, schema, database):
'''
The query optimization is a bit harder, so we just leave the optimization of query
to user's own judgement and come back to it in the future.
'''
#query = self.accept(SimplificationVisitor())
#query = query.accept(QueryTreeTransformerVisitor())
query = self.accept(QueryCompilerVisitor(schema, database))
return query
def accept(self, visitor):
"""在to_query里被调用,不同子类有不同实现"""
raise NotImplementedError
def _combine(self, other, operation):
"""
Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty'):
return self
| return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
class QCombination(QNode):
"""
Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not self.children
def __repr__(self):
return '%s: (%s, [%s])' % \
(type(self), OPERATIONINT2STR[str(self.operation)], ', '.join([str(x) for x in self.children]))
class Q(QNode):
"""
A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not self.query
def __repr__(self):
return '%s: %s' % (type(self), self.query)
class notQ(Q):
"""
A query object based on simple query object, used in a query tree to
build up NOT query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_not_query(self)
class Schema:
v1 = {
'obj_id': '_id',
'posted_at_key': 'ts',
'idx_fields': [
{'field_name': 'uid', 'column': 0, 'type': 'long'},
{'field_name': 'name', 'column': 1, 'type': 'text'},
{'field_name': 'text', 'column': 2, 'type': 'text'},
{'field_name': 'ts', 'column': 3, 'type': 'long'}
],
}
if __name__ == "__main__":
"""
cd to test/ folder
then run 'py (-m memory_profiler) ../xapian_weibo/xapian_backend.py -d hehe'
http://pypi.python.org/pypi/memory_profiler
"""
parser = ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='DEBUG')
parser.add_argument('-p', '--print_folders', action='store_true', help='PRINT FOLDER THEN EXIT')
parser.add_argument('-s', '--start_time', nargs=1, help='DATETIME')
parser.add_argument('dbpath', help='PATH_TO_DATABASE')
args = parser.parse_args(sys.argv[1:])
debug = args.debug
dbpath = args.dbpath
if args.print_folders:
debug = True
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate()
for _, folder in xapian_indexer.folders_with_date:
print folder
sys.exit(0)
start_time = args.start_time[0] if args.start_time else None
if debug:
if start_time:
print 'debug mode(warning): start_time will not be used'
PROCESS_IDX_SIZE = 10000
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate(start_time)
xapian_indexer.load_and_index_weibos(start_time) | if self.empty:
return other
| random_line_split |
xapian_backend.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import copy
import xapian
import cPickle as pickle
import simplejson as json
import pymongo
import scws
import datetime
import calendar
from itertools import product
from argparse import ArgumentParser
SCWS_ENCODING = 'utf-8'
SCWS_RULES = '/usr/local/scws/etc/rules.utf8.ini'
CHS_DICT_PATH = '/usr/local/scws/etc/dict.utf8.xdb'
CHT_DICT_PATH = '/usr/local/scws/etc/dict_cht.utf8.xdb'
CUSTOM_DICT_PATH = '../dict/userdic.txt'
IGNORE_PUNCTUATION = 1
EXTRA_STOPWORD_PATH = '../dict/stopword.dic'
EXTRA_EMOTIONWORD_PATH = '../dict/emotionlist.txt'
PROCESS_IDX_SIZE = 100000
SCHEMA_VERSION = 1
DOCUMENT_ID_TERM_PREFIX = 'M'
DOCUMENT_CUSTOM_TERM_PREFIX = 'X'
OPERATIONINT2STR = {
'0': 'AND',
'1': 'AND_NOT',
'2': 'OR',
'3': 'XOR',
'4': 'NOT',
}
class XapianIndex(object):
def __init__(self, dbpath, schema_version):
self.path = dbpath
self.schema = getattr(Schema, 'v%s' % schema_version, None)
self.databases = {}
self.load_scws()
self.load_mongod()
self.load_extra_dic()
def document_count(self, folder):
try:
return _database(folder).get_doccount()
except InvalidIndexError:
return 0
def generate(self, start_time=None):
folders_with_date = []
if not debug and start_time:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d')
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
elif debug:
start_time = datetime.datetime(2009, 8, 1)
step_time = datetime.timedelta(days=50)
while start_time < datetime.datetime.today():
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
start_time += step_time
self.folders_with_date = folders_with_date
def | (self):
self.emotion_words = [line.strip('\r\n') for line in file(EXTRA_EMOTIONWORD_PATH)]
def load_scws(self):
s = scws.Scws()
s.set_charset(SCWS_ENCODING)
s.set_dict(CHS_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CHT_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CUSTOM_DICT_PATH, scws.XDICT_TXT)
# 把停用词全部拆成单字,再过滤掉单字,以达到去除停用词的目的
s.add_dict(EXTRA_STOPWORD_PATH, scws.XDICT_TXT)
# 即基于表情表对表情进行分词,必要的时候在返回结果处或后剔除
s.add_dict(EXTRA_EMOTIONWORD_PATH, scws.XDICT_TXT)
s.set_rules(SCWS_RULES)
s.set_ignore(IGNORE_PUNCTUATION)
self.s = s
def load_mongod(self):
connection = pymongo.Connection()
db = connection.admin
db.authenticate('root', 'root')
db = connection.weibo
self.db = db
def get_database(self, folder):
if folder not in self.databases:
self.databases[folder] = _database(folder, writable=True)
return self.databases[folder]
#@profile
def load_and_index_weibos(self, start_time=None):
if not debug and start_time:
start_time = self.folders_with_date[0][0]
end_time = start_time + datetime.timedelta(days=50)
weibos = self.db.statuses.find({
self.schema['posted_at_key']: {
'$gte': calendar.timegm(start_time.timetuple()),
'$lt': calendar.timegm(end_time.timetuple())
}
}, timeout=False)
print 'prod mode: loaded weibos from mongod'
elif debug:
with open("../test/sample_tweets.js") as f:
weibos = json.loads(f.readline())
print 'debug mode: loaded weibos from file'
count = 0
try:
for weibo in weibos:
count += 1
posted_at = datetime.datetime.fromtimestamp(weibo[self.schema['posted_at_key']])
if not debug and start_time:
folder = self.folders_with_date[0][1]
elif debug:
for i in xrange(len(self.folders_with_date) - 1):
if self.folders_with_date[i][0] <= posted_at < self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i][1]
break
else:
if posted_at >= self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i + 1][1]
self.update(folder, weibo)
if count % PROCESS_IDX_SIZE == 0:
print '[%s] folder[%s] num indexed: %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), folder, count)
except Exception:
raise
finally:
for database in self.databases.itervalues():
database.close()
for _, folder in self.folders_with_date:
print 'index size', folder, self.document_count(folder)
def update(self, folder, weibo):
document = xapian.Document()
document_id = DOCUMENT_ID_TERM_PREFIX + weibo[self.schema['obj_id']]
for field in self.schema['idx_fields']:
self.index_field(field, document, weibo, SCHEMA_VERSION)
document.set_data(pickle.dumps(
weibo, pickle.HIGHEST_PROTOCOL
))
document.add_term(document_id)
self.get_database(folder).replace_document(document_id, document)
def index_field(self, field, document, weibo, schema_version):
prefix = DOCUMENT_CUSTOM_TERM_PREFIX + field['field_name'].upper()
if schema_version == 1:
if field['field_name'] in ['uid', 'name']:
term = _marshal_term(weibo[field['field_name']])
document.add_term(prefix + term)
elif field['field_name'] == 'ts':
document.add_value(field['column'], _marshal_value(weibo[field['field_name']]))
elif field['field_name'] == 'text':
tokens = [token[0] for token
in self.s.participle(weibo[field['field_name']].encode('utf-8'))
if len(token[0]) > 1]
for token in tokens:
if len(token) <= 10:
document.add_term(prefix + token)
document.add_value(field['column'], weibo[field['field_name']])
class XapianSearch(object):
def __init__(self, path='../data/', name='statuses', schema_version=SCHEMA_VERSION):
def create(dbpath):
return xapian.Database(dbpath)
def merge(db1, db2):
db1.add_database(db2)
return db1
self.database = reduce(merge,
map(create,
[path + p for p in os.listdir(path) if p.startswith('_%s' % name)]))
self.schema = getattr(Schema, 'v%s' % schema_version, None)
def parse_query(self, query_dict):
"""
Given a `query_dict`, will attempt to return a xapian.Query
Required arguments:
``query_dict`` -- A query dict similar to MongoDB style to parse
Returns a xapian.Query
Operator Reference:
Comparison:
equal, key = value, { key:value }
$lt, $gt, the field less or more than the specified value, { field: { $lt: value, $gt: value } }
Logical:
$and, perform logical AND operation in expressions, { $and: [{ <expression1> } , { <expression2> },
... , { <expressionN> }] }
$or, perform logical OR operation in expressions like the $and operation
$xor, perform logical XOR operation in expressions like the $and operation
$not, perform logical NOT operation in experssions, which get the conjunction of both negative
experssions, { $not: { <expression1> }, { <expression2> }, ... { <expressionN> } }
PS: if not any operation is specified, the logical AND operation is the default operation
(An implicit AND operation is performed when specifying a comma separated list of expressions).
See more query examples in test files.
"""
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
def build_query_tree(self, query_dict):
"""将字典转成语法树"""
ops = ['$not']
bi_ops = ['$or', '$and', '$xor']
def op(a, b, operation):
if operation == '$and':
return a & b
elif operation == '$or':
return a | b
elif operation == '$xor':
return a ^ b
else:
raise OperationError('Operation %s cannot be processed.' % operation)
def grammar_tree(query_dict):
total_query = Q()
for k in query_dict.keys():
if k in bi_ops:
#deal with expression without operator
bi_query = reduce(lambda a, b: op(a, b, k),
map(lambda expr: Q(**expr),
filter(lambda expr: not (set(expr.keys()) & set(ops + bi_ops)), query_dict[k])), Q())
#deal with nested expression
nested_query = reduce(lambda a, b: op(a, b, k),
map(lambda query_dict: grammar_tree(query_dict),
filter(lambda expr: set(expr.keys()) & set(ops + bi_ops), query_dict[k])), Q())
if nested_query:
total_query &= op(bi_query, nested_query, k)
else:
total_query &= bi_query
elif k in ops:
if k == '$not':
not_dict = {}
#nested_query_dict = {}
for not_k in query_dict[k]:
if not_k not in ops + bi_ops:
not_dict[not_k] = query_dict[k][not_k]
else:
pass
#nested query in a $not statement is not implemented
#nested_query_dict.update({not_k: query_dict[k][not_k]})
not_query = notQ(**not_dict)
total_query &= not_query
else:
total_query &= Q(**{k: query_dict[k]})
return total_query
total_query = grammar_tree(query_dict)
return total_query
def search(self, query=None, sort_by=None, start_offset=0,
max_offset=1000, fields=None, **kwargs):
query = self.parse_query(query)
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
database = self.database
enquire = xapian.Enquire(database)
enquire.set_query(query)
if sort_by:
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False # Reverse is inverted in Xapian -- http://trac.xapian.org/ticket/311
sorter.add(self._value_column(sort_field), reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
results = []
if not max_offset:
max_offset = database.get_doccount() - start_offset
matches = self._get_enquire_mset(database, enquire, start_offset, max_offset)
for match in matches:
weibo = pickle.loads(self._get_document_data(database, match.document))
item = None
if fields is not None: # 如果fields为[], 这情况下,不返回任何一项
item = {}
for field in fields:
item[field] = weibo[field]
else:
item = weibo
results.append(item)
return {
'results': results,
'hits': self._get_hit_count(database, enquire)
}
def _get_enquire_mset(self, database, enquire, start_offset, max_offset):
"""
A safer version of Xapian.enquire.get_mset
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`enquire` -- An instance of an Xapian.enquire object
`start_offset` -- The start offset to pass to `enquire.get_mset`
`max_offset` -- The max offset (maxitems to acquire) to pass to `enquire.get_mset`
"""
try:
return enquire.get_mset(start_offset, max_offset)
except xapian.DatabaseModifiedError:
database.reopen()
return enquire.get_mset(start_offset, max_offset)
def _get_document_data(self, database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data()
def _value_column(self, field):
"""
Private method that returns the column value slot in the database
for a given field.
Required arguemnts:
`field` -- The field to lookup
Returns an integer with the column location (0 indexed).
"""
for field_dict in self.schema['idx_fields']:
if field_dict['field_name'] == field:
return field_dict['column']
return 0
def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size()
def _marshal_value(value):
"""
Private utility method that converts Python values to a string for Xapian values.
"""
if isinstance(value, (int, long)):
value = xapian.sortable_serialise(value)
return value
def _marshal_term(term):
"""
Private utility method that converts Python terms to a string for Xapian terms.
"""
if isinstance(term, int):
term = str(term)
return term
def _database(folder, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if writable:
if debug:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)
else:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(folder)
except xapian.DatabaseOpeningError:
raise InvalidIndexError(u'Unable to open index at %s' % folder)
return database
class InvalidIndexError(Exception):
"""Raised when an index can not be opened."""
pass
class InvalidQueryError(Exception):
"""Raised when a query is illegal."""
pass
class OperationError(Exception):
"""Raised when queries cannot be operated."""
pass
class QNodeVisitor(object):
"""
Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""
Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""
Called by (New)Q objects.
"""
return query
def visit_not_query(self, query):
"""
Called by (New)NOT Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""
Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
"""
The simplification only applies to 'simple' queries
如果最外层的操作符是and,然后里面的每个元素都是一个独自的Q且不是not Q
将所有的Q的query抽出来,到一个query里面来
"""
if all(isinstance(node, Q) and not isinstance(node, notQ)
for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""
Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops & query_ops
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""
Transforms the query tree in to a form that may be more effective used with Xapian.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# Move the ORs up the tree to one 'master' $or.
# Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""
Compiles the nodes in a query tree to a Xapian-compatible query.
"""
def __init__(self, schema, database):
self.schema = schema
self.database = database
def visit_combination(self, combination):
if combination.operation == combination.OR:
return xapian.Query(xapian.Query.OP_OR, combination.children)
elif combination.operation == combination.AND:
return xapian.Query(xapian.Query.OP_AND, combination.children)
elif combination.operation == combination.AND_NOT:
return xapian.Query(xapian.Query.OP_AND_NOT, combination.children)
elif combination.operation == combination.XOR:
return xapian.Query(xapian.Query.OP_XOR, combination.children)
return combination
def visit_not_query(self, query):
new_query = self.visit_query(query, n=True)
#NOT set is the intersection of universal set AND NOT set
new_query = xapian.Query(xapian.Query.OP_AND_NOT, [xapian.Query(''), new_query])
return new_query
def visit_query(self, query, n=False):
query_dict = query.query
qp = xapian.QueryParser()
qp.set_database(self.database)
field_prefix = {}
field_type = {}
field_col = {}
for field_dict in self.schema['idx_fields']:
fname = field_dict['field_name']
field_col[fname] = field_dict['column']
field_type[fname] = field_dict['type']
field_prefix[fname] = DOCUMENT_CUSTOM_TERM_PREFIX + fname.upper()
pre_query = None
new_query = None
for field in query_dict:
if field in field_prefix:
prefix = field_prefix[field]
col = field_col[field]
value = query_dict[field]
if isinstance(value, dict):
ftype = field_type[field]
if ftype == 'int' or ftype == 'long':
begin = value.get('$gt', 0)
end = value.get('$lt', sys.maxint)
qp.add_valuerangeprocessor(xapian.NumberValueRangeProcessor(col, '%s' % prefix))
new_query = qp.parse_query('%s%s..%s' % (prefix, begin, end))
elif not hasattr(value, 'strip') and hasattr(value, '__getitem__') or hasattr(value, '__iter__'):
value = ['%s%s' % (prefix, v) for v in value]
#De Morgan's laws, if we want the intersection of negation sets,
#Firstly, we obtain the disjunction of this sets, then get negation of them
# (AND_NOT [U, (OR, [a, b, c])])
# NOT (a OR B OR C)
# NOT a AND not b AND not C
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, value)
else:
new_query = xapian.Query(xapian.Query.OP_OR, value)
else:
new_query = xapian.Query('%s%s' % (prefix, value))
if pre_query:
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, [pre_query, new_query])
else:
# and_not , [U, a or b])
# not a and not b
new_query = xapian.Query(xapian.Query.OP_OR, [pre_query, new_query])
pre_query = new_query
return new_query
class QNode(object):
"""
Base class for nodes in query trees.
"""
AND = 0
AND_NOT = 1
OR = 2
XOR = 3
NOT = 4
def to_query(self, schema, database):
'''
The query optimization is a bit harder, so we just leave the optimization of query
to user's own judgement and come back to it in the future.
'''
#query = self.accept(SimplificationVisitor())
#query = query.accept(QueryTreeTransformerVisitor())
query = self.accept(QueryCompilerVisitor(schema, database))
return query
def accept(self, visitor):
"""在to_query里被调用,不同子类有不同实现"""
raise NotImplementedError
def _combine(self, other, operation):
"""
Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty'):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
class QCombination(QNode):
"""
Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not self.children
def __repr__(self):
return '%s: (%s, [%s])' % \
(type(self), OPERATIONINT2STR[str(self.operation)], ', '.join([str(x) for x in self.children]))
class Q(QNode):
"""
A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not self.query
def __repr__(self):
return '%s: %s' % (type(self), self.query)
class notQ(Q):
"""
A query object based on simple query object, used in a query tree to
build up NOT query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_not_query(self)
class Schema:
v1 = {
'obj_id': '_id',
'posted_at_key': 'ts',
'idx_fields': [
{'field_name': 'uid', 'column': 0, 'type': 'long'},
{'field_name': 'name', 'column': 1, 'type': 'text'},
{'field_name': 'text', 'column': 2, 'type': 'text'},
{'field_name': 'ts', 'column': 3, 'type': 'long'}
],
}
if __name__ == "__main__":
"""
cd to test/ folder
then run 'py (-m memory_profiler) ../xapian_weibo/xapian_backend.py -d hehe'
http://pypi.python.org/pypi/memory_profiler
"""
parser = ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='DEBUG')
parser.add_argument('-p', '--print_folders', action='store_true', help='PRINT FOLDER THEN EXIT')
parser.add_argument('-s', '--start_time', nargs=1, help='DATETIME')
parser.add_argument('dbpath', help='PATH_TO_DATABASE')
args = parser.parse_args(sys.argv[1:])
debug = args.debug
dbpath = args.dbpath
if args.print_folders:
debug = True
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate()
for _, folder in xapian_indexer.folders_with_date:
print folder
sys.exit(0)
start_time = args.start_time[0] if args.start_time else None
if debug:
if start_time:
print 'debug mode(warning): start_time will not be used'
PROCESS_IDX_SIZE = 10000
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate(start_time)
xapian_indexer.load_and_index_weibos(start_time)
| load_extra_dic | identifier_name |
xapian_backend.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import copy
import xapian
import cPickle as pickle
import simplejson as json
import pymongo
import scws
import datetime
import calendar
from itertools import product
from argparse import ArgumentParser
SCWS_ENCODING = 'utf-8'
SCWS_RULES = '/usr/local/scws/etc/rules.utf8.ini'
CHS_DICT_PATH = '/usr/local/scws/etc/dict.utf8.xdb'
CHT_DICT_PATH = '/usr/local/scws/etc/dict_cht.utf8.xdb'
CUSTOM_DICT_PATH = '../dict/userdic.txt'
IGNORE_PUNCTUATION = 1
EXTRA_STOPWORD_PATH = '../dict/stopword.dic'
EXTRA_EMOTIONWORD_PATH = '../dict/emotionlist.txt'
PROCESS_IDX_SIZE = 100000
SCHEMA_VERSION = 1
DOCUMENT_ID_TERM_PREFIX = 'M'
DOCUMENT_CUSTOM_TERM_PREFIX = 'X'
OPERATIONINT2STR = {
'0': 'AND',
'1': 'AND_NOT',
'2': 'OR',
'3': 'XOR',
'4': 'NOT',
}
class XapianIndex(object):
def __init__(self, dbpath, schema_version):
self.path = dbpath
self.schema = getattr(Schema, 'v%s' % schema_version, None)
self.databases = {}
self.load_scws()
self.load_mongod()
self.load_extra_dic()
def document_count(self, folder):
try:
return _database(folder).get_doccount()
except InvalidIndexError:
return 0
def generate(self, start_time=None):
folders_with_date = []
if not debug and start_time:
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d')
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
elif debug:
start_time = datetime.datetime(2009, 8, 1)
step_time = datetime.timedelta(days=50)
while start_time < datetime.datetime.today():
folder = "_%s_%s" % (self.path, start_time.strftime('%Y-%m-%d'))
folders_with_date.append((start_time, folder))
start_time += step_time
self.folders_with_date = folders_with_date
def load_extra_dic(self):
self.emotion_words = [line.strip('\r\n') for line in file(EXTRA_EMOTIONWORD_PATH)]
def load_scws(self):
s = scws.Scws()
s.set_charset(SCWS_ENCODING)
s.set_dict(CHS_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CHT_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CUSTOM_DICT_PATH, scws.XDICT_TXT)
# 把停用词全部拆成单字,再过滤掉单字,以达到去除停用词的目的
s.add_dict(EXTRA_STOPWORD_PATH, scws.XDICT_TXT)
# 即基于表情表对表情进行分词,必要的时候在返回结果处或后剔除
s.add_dict(EXTRA_EMOTIONWORD_PATH, scws.XDICT_TXT)
s.set_rules(SCWS_RULES)
s.set_ignore(IGNORE_PUNCTUATION)
self.s = s
def load_mongod(self):
connection = pymongo.Connection()
db = connection.admin
db.authenticate('root', 'root')
db = connection.weibo
self.db = db
def get_database(self, folder):
if folder not in self.databases:
self.databases[folder] = _database(folder, writable=True)
return self.databases[folder]
#@profile
def load_and_index_weibos(self, start_time=None):
if not debug and start_time:
start_time = self.folders_with_date[0][0]
end_time = start_time + datetime.timedelta(days=50)
weibos = self.db.statuses.find({
self.schema['posted_at_key']: {
'$gte': calendar.timegm(start_time.timetuple()),
'$lt': calendar.timegm(end_time.timetuple())
}
}, timeout=False)
print 'prod mode: loaded weibos from mongod'
elif debug:
with open("../test/sample_tweets.js") as f:
weibos = json.loads(f.readline())
print 'debug mode: loaded weibos from file'
count = 0
try:
for weibo in weibos:
count += 1
posted_at = datetime.datetime.fromtimestamp(weibo[self.schema['posted_at_key']])
if not debug and start_time:
folder = self.folders_with_date[0][1]
elif debug:
for i in xrange(len(self.folders_with_date) - 1):
if self.folders_with_date[i][0] <= posted_at < self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i][1]
break
else:
if posted_at >= self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i + 1][1]
self.update(folder, weibo)
if count % PROCESS_IDX_SIZE == 0:
print '[%s] folder[%s] num indexed: %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), folder, count)
except Exception:
raise
finally:
for database in self.databases.itervalues():
database.close()
for _, folder in self.folders_with_date:
print 'index size', folder, self.document_count(folder)
def update(self, folder, weibo):
document = xapian.Document()
document_id = DOCUMENT_ID_TERM_PREFIX + weibo[self.schema['obj_id']]
for field in self.schema['idx_fields']:
self.index_field(field, document, weibo, SCHEMA_VERSION)
document.set_data(pickle.dumps(
weibo, pickle.HIGHEST_PROTOCOL
))
document.add_term(document_id)
self.get_database(folder).replace_document(document_id, document)
def index_field(self, field, document, weibo, schema_version):
prefix = DOCUMENT_CUSTOM_TERM_PREFIX + field['field_name'].upper()
if schema_version == 1:
if field['field_name'] in ['uid', 'name']:
term = _marshal_term(weibo[field['field_name']])
document.add_term(prefix + term)
elif field['field_name'] == 'ts':
document.add_value(field['column'], _marshal_value(weibo[field['field_name']]))
elif field['field_name'] == 'text':
tokens = [token[0] for token
in self.s.participle(weibo[field['field_name']].encode('utf-8'))
if len(token[0]) > 1]
for token in tokens:
if len(token) <= 10:
document.add_term(prefix + token)
document.add_value(field['column'], weibo[field['field_name']])
class XapianSearch(object):
def __init__(self, path='../data/', name='statuses', schema_version=SCHEMA_VERSION):
def create(dbpath):
return xapian.Database(dbpath)
def merge(db1, db2):
db1.add_database(db2)
return db1
self.database = reduce(merge,
map(create,
[path + p for p in os.listdir(path) if p.startswith('_%s' % name)]))
self.schema = getattr(Schema, 'v%s' % schema_version, None)
def parse_query(self, query_dict):
"""
Given a `query_dict`, will attempt to return a xapian.Query
Required arguments:
``query_dict`` -- A query dict similar to MongoDB style to parse
Returns a xapian.Query
Operator Reference:
Comparison:
equal, key = value, { key:value }
$lt, $gt, the field less or more than the specified value, { field: { $lt: value, $gt: value } }
Logical:
$and, perform logical AND operation in expressions, { $and: [{ <expression1> } , { <expression2> },
... , { <expressionN> }] }
$or, perform logical OR operation in expressions like the $and operation
$xor, perform logical XOR operation in expressions like the $and operation
$not, perform logical NOT operation in experssions, which get the conjunction of both negative
experssions, { $not: { <expression1> }, { <expression2> }, ... { <expressionN> } }
PS: if not any operation is specified, the logical AND operation is the default operation
(An implicit AND operation is performed when specifying a comma separated list of expressions).
See more query examples in test files.
"""
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
def build_query_tree(self, query_dict):
"""将字典转成语法树"""
ops = ['$not']
bi_ops = ['$or', '$and', '$xor']
def op(a, b, operation):
if operation == '$and':
return a & b
elif operation == '$or':
return a | b
elif operation == '$xor':
return a ^ b
else:
raise OperationError('Operation %s cannot be processed.' % operation)
def grammar_tree(query_dict):
total_query = Q()
for k in query_dict.keys():
if k in bi_ops:
#deal with expression without operator
bi_query = reduce(lambda a, b: op(a, b, k),
map(lambda expr: Q(**expr),
filter(lambda expr: not (set(expr.keys()) & set(ops + bi_ops)), query_dict[k])), Q())
#deal with nested expression
nested_query = reduce(lambda a, b: op(a, b, k),
map(lambda query_dict: grammar_tree(query_dict),
filter(lambda expr: set(expr.keys()) & set(ops + bi_ops), query_dict[k])), Q())
if nested_query:
total_query &= op(bi_query, nested_query, k)
else:
total_query &= bi_query
elif k in ops:
if k == '$not':
not_dict = {}
#nested_query_dict = {}
for not_k in query_dict[k]:
if not_k not in ops + bi_ops:
not_dict[not_k] = query_dict[k][not_k]
else:
pass
#nested query in a $not statement is not implemented
#nested_query_dict.update({not_k: query_dict[k][not_k]})
not_query = notQ(**not_dict)
total_query &= not_query
else:
total_query &= Q(**{k: query_dict[k]})
return total_query
total_query = grammar_tree(query_dict)
return total_query
def search(self, query=None, sort_by=None, start_offset=0,
max_offset=1000, fields=None, **kwargs):
query = self.parse_query(query)
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
database = self.database
enquire = xapian.Enquire(database)
enquire.set_query(query)
if sort_by:
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False # Reverse is inverted in Xapian -- http://trac.xapian.org/ticket/311
sorter.add(self._value_column(sort_field), reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
results = []
if not max_offset:
max_offset = database.get_doccount() - start_offset
matches = self._get_enquire_mset(database, enquire, start_offset, max_offset)
for match in matches:
weibo = pickle.loads(self._get_document_data(database, match.document))
item = None
if fields is not None: # 如果fields为[], 这情况下,不返回任何一项
item = {}
for field in fields:
item[field] = weibo[field]
else:
item = weibo
results.append(item)
return {
'results': results,
'hits': self._get_hit_count(database, enquire)
}
def _get_enquire_mset(self, database, enquire, start_offset, max_offset):
"""
A safer version of Xapian.enquire.get_mset
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`enquire` -- An instance of an Xapian.enquire object
`start_offset` -- The start offset to pass to `enquire.get_mset`
`max_offset` -- The max offset (maxitems to acquire) to pass to `enquire.get_mset`
"""
try:
return enquire.get_mset(start_offset, max_offset)
except xapian.DatabaseModifiedError:
database.reopen()
return enquire.get_mset(start_offset, max_offset)
def _get_document_data(self, database, document):
"""
A safer version of Xapian.document.get_data
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
`database` -- The database to be read
`document` -- An instance of an Xapian.document object
"""
try:
return document.get_data()
except xapian.DatabaseModifiedError:
database.reopen()
return document.get_data()
def _value_column(self, field):
"""
Private method that returns the column value slot in the database
for a given field.
Required arguemnts:
`field` -- The field to lookup
Returns an integer with the column location (0 indexed).
"""
for field_dict in self.schema['idx_fields']:
if field_dict['field_name'] == field:
return field_dict['column']
return 0
def _get_hit_count(self, database, enquire):
"""
Given a database and enquire instance, returns the estimated number
of matches.
Required arguments:
`database` -- The database to be queried
`enquire` -- The enquire instance
"""
return self._get_enquire_mset(
database, enquire, 0, database.get_doccount()
).size()
def _marshal_value(value):
"""
Private utility method that converts Python values to a string for Xapian values.
"""
if isinstance(value, (int, long)):
value = xapian.sortable_serialise(value)
return value
def _marshal_term(term):
"""
Private utility method that converts Python terms to a string for Xapian terms.
"""
if isinstance(term, int):
term = str(term)
return term
def _database(folder, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if writable:
if debug:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)
else:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(folder)
except xapian.DatabaseOpeningError:
raise InvalidIndexError(u'Unable to open index at %s' % folder)
return database
class InvalidIndexError(Exception):
"""Raised when an index can not be opened."""
pass
class InvalidQueryError(Exception):
"""Raised when a query is illegal."""
pass
class OperationError(Exception):
"""Raised when queries cannot be operated."""
pass
class QNodeVisitor(object):
"""
Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""
Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""
Called by (New)Q objects.
"""
return query
def visit_not_query(self, query):
"""
Called by (New)NOT Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""
Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
"""
The simplification only applies to 'simple' queries
如果最外层的操作符是and,然后里面的每个元素都是一个独自的Q且不是not Q
将所有的Q的query抽出来,到一个query里面来
"""
if all(isinstance(node, Q) and not isinstance(node, notQ)
for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""
Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops & query_ops
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""
Transforms the query tree in to a form that may be more effective used with Xapian.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# Move the ORs up the tree to one 'master' $or.
# Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""
Compiles the nodes in a query tree to a Xapian-compatible query.
"""
def __init__(self, schema, database):
self.schema = schema
self.database = database
def visit_combination(self, combination):
if combination.operation == combination.OR:
return xapian.Query(xapian.Query.OP_OR, combination.children)
elif combination.operation == combination.AND:
return xapian.Query(xapian.Query.OP_AND, combination.children)
elif combination.operation == combination.AND_NOT:
return xapian.Query(xapian.Query.OP_AND_NOT, combination.children)
elif combination.operation == combination.XOR:
return xapian.Query(xapian.Query.OP_XOR, combination.children)
return combination
def visit_not_query(self, query):
new_query = self.visit_query(query, n=True)
#NOT set is the intersection of universal set AND NOT set
new_query = xapian.Query(xapian.Query.OP_AND_NOT, [xapian.Query(''), new_query])
return new_query
def visit_query(self, query, n=False):
query_dict = query.query
qp = xapian.QueryParser()
qp.set_database(self.database)
field_prefix = {}
field_type = {}
field_col = {}
for field_dict in self.schema['idx_fields']:
fname = field_dict['field_name']
field_col[fname] = field_dict['column']
field_type[fname] = field_dict['type']
field_prefix[fname] = DOCUMENT_CUSTOM_TERM_PREFIX + fname.upper()
pre_query = None
new_query = None
for field in query_dict:
if field in field_prefix:
prefix = field_prefix[field]
col = field_col[field]
value = query_dict[field]
if isinstance(value, dict):
ftype = field_type[field]
if ftype == 'int' or ftype == 'long':
begin = value.get('$gt', 0)
end = value.get('$lt', sys.maxint)
qp.add_valuerangeprocessor(xapian.NumberValueRangeProcessor(col, '%s' % prefix))
new_query = qp.parse_query('%s%s..%s' % (prefix, begin, end))
elif not hasattr(value, 'strip') and hasattr(value, '__getitem__') or hasattr(value, '__iter__'):
value = ['%s%s' % (prefix, v) for v in value]
#De Morgan's laws, if we want the intersection of negation sets,
#Firstly, we obtain the disjunction of this sets, then get negation of them
# (AND_NOT [U, (OR, [a, b, c])])
# NOT (a OR B OR C)
# NOT a AND not b AND not C
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, value)
else:
new_query = xapian.Query(xapian.Query.OP_OR, value)
else:
new_query = xapian.Query('%s%s' % (prefix, value))
if pre_query:
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, [pre_query, new_query])
else:
# and_not , [U, a or b])
# not a and not b
new_query = xapian.Query(xapian.Query.OP_OR, [pre_query, new_query])
pre_query = new_query
return new_query
class QNode(object):
"""
Base class for nodes in query trees.
"""
AND = 0
AND_NOT = 1
OR = 2
XOR = 3
NOT = 4
def to_query(self, schema, database):
'''
The query optimization is a bit harder, so we just leave the optimization of query
to user's own judgement and come back to it in the future.
'''
#query = self.accept(SimplificationVisitor())
#query = query.accept(QueryTreeTransformerVisitor())
query = self.accept(QueryCompilerVisitor(schema, database))
return query
def accept(self, visitor):
"""在to_query里被调用,不同子类有不同实现"""
raise NotImplementedError
def _combine(self, other, operation):
"""
Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty'):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
class QCombination(QNode):
"""
Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not self.children
def __repr__(self):
return '%s: (%s, [%s])' % \
(type(self), OPERATIONINT2STR[str(self.operation)], ', '.join([str(x) for x in self.children]))
class Q(QNode):
"""
A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not self.query
def __repr__(self):
return '%s: %s' % (type(self), self.query)
class notQ(Q):
"""
A query object based on simple query object, used in a query tree to
build up NOT query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_not_query(self)
class Schema:
v1 = {
'obj_id': '_id',
'posted_at_key': 'ts',
'idx_fields': [
{'field_name': 'uid', 'column': 0, 'type': 'long'},
{'field_name': 'name', 'column': 1, 'type': 'text'},
{'field_name': 'text', 'column': 2, 'type': 'text'},
{'field_name': 'ts', 'column': 3, 'type': 'long'}
],
}
if __name__ == "__main__":
"""
cd to test/ folder
then run 'py (-m memory_profiler) ../xapian_weibo/xapian_backend.py -d hehe'
http://pypi.python.org/pypi/memory_profiler
"""
parser = ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='DEBUG')
parser.a | dd_argument('-p', '--print_folders', action='store_true', help='PRINT FOLDER THEN EXIT')
parser.add_argument('-s', '--start_time', nargs=1, help='DATETIME')
parser.add_argument('dbpath', help='PATH_TO_DATABASE')
args = parser.parse_args(sys.argv[1:])
debug = args.debug
dbpath = args.dbpath
if args.print_folders:
debug = True
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate()
for _, folder in xapian_indexer.folders_with_date:
print folder
sys.exit(0)
start_time = args.start_time[0] if args.start_time else None
if debug:
if start_time:
print 'debug mode(warning): start_time will not be used'
PROCESS_IDX_SIZE = 10000
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate(start_time)
xapian_indexer.load_and_index_weibos(start_time)
| conditional_block | |
y.go | //line numbers.y:2
package main
import __yyfmt__ "fmt"
//line numbers.y:2
//line numbers.y:5
type yySymType struct {
yys int
expr *Expr
exprlist []*Expr
stmt *Stmt
stmtlist []*Stmt
tok int
op Op
line Line
}
const _LE = 57346
const _GE = 57347
const _EQ = 57348
const _NE = 57349
const _UNARYMINUS = 57350
const _EQQ = 57351
const _EXPR = 57352
const _MAX = 57353
const _MIN = 57354
const _USE = 57355
const _PRINT = 57356
const _CHECK = 57357
const _COND = 57358
const _INCLUDE = 57359
var yyToknames = []string{
" <",
"_LE",
" >",
"_GE",
"_EQ",
"_NE",
" +",
" -",
" *",
" /",
"_UNARYMINUS",
" =",
"_EQQ",
"_EXPR",
"_MAX",
"_MIN",
"_USE",
"_PRINT",
"_CHECK",
"_COND",
"_INCLUDE",
}
var yyStatenames = []string{}
const yyEofCode = 1
const yyErrCode = 2
const yyMaxDepth = 200
//line numbers.y:101
//line yacctab:1
var yyExca = []int{
-1, 1,
1, -1,
-2, 0,
}
const yyNprod = 30
const yyPrivate = 57344
var yyTokenNames []string
var yyStates []string
const yyLast = 150
var yyAct = []int{
24, 19, 66, 44, 65, 44, 23, 17, 20, 21,
42, 16, 41, 22, 46, 44, 18, 40, 15, 38,
39, 25, 26, 45, 44, 43, 44, 14, 13, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 64,
63, 61, 1, 60, 10, 62, 3, 58, 59, 32,
33, 34, 35, 36, 37, 28, 29, 30, 31, 6,
30, 31, 7, 8, 9, 2, 5, 4, 68, 0,
70, 11, 12, 69, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 28, 29, 30, 31, 67, 0,
0, 0, 0, 0, 0, 0, 0, 71, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 0, 0,
0, 57, 0, 0, 0, 0, 0, 0, 0, 27,
32, 33, 34, 35, 36, 37, 28, 29, 30, 31,
}
var yyPact = []int{
-1000, -1000, 42, -1000, -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27,
}
var yyDef = []int{
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yylex1(lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int | {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yyLast {
goto yydefault
}
yyn = yyAct[yyn]
if yyChk[yyn] == yychar { /* valid shift */
yychar = -1
yyVAL = yylval
yystate = yyn
if Errflag > 0 {
Errflag--
}
goto yystack
}
yydefault:
/* default state action */
yyn = yyDef[yystate]
if yyn == -2 {
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
/* look through exception table */
xi := 0
for {
if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
yyn = yyExca[xi+0]
if yyn < 0 || yyn == yychar {
break
}
}
yyn = yyExca[xi+1]
if yyn < 0 {
goto ret0
}
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
yylex.Error("syntax error")
Nerrs++
if yyDebug >= 1 {
__yyfmt__.Printf("%s", yyStatname(yystate))
__yyfmt__.Printf("saw %s\n", yyTokname(yychar))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
yyn = yyPact[yyS[yyp].yys] + yyErrCode
if yyn >= 0 && yyn < yyLast {
yystate = yyAct[yyn] /* simulate a shift of "error" */
if yyChk[yystate] == yyErrCode {
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
}
if yychar == yyEofCode {
goto ret1
}
yychar = -1
goto yynewstate /* try again in the same state */
}
}
/* reduction by production yyn */
if yyDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
}
yynt := yyn
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= yyR2[yyn]
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
yyn = yyR1[yyn]
yyg := yyPgo[yyn]
yyj := yyg + yyS[yyp].yys + 1
if yyj >= yyLast {
yystate = yyAct[yyg]
} else {
yystate = yyAct[yyj]
if yyChk[yystate] != -yyn {
yystate = yyAct[yyg]
}
}
// dummy call; replaced with literal code
switch yynt {
case 1:
//line numbers.y:35
{
prog = append(prog, yyS[yypt-0].stmtlist...)
}
case 2:
//line numbers.y:38
{
yyVAL.stmtlist = nil
}
case 3:
//line numbers.y:40
{
yyVAL.stmtlist = append(yyS[yypt-1].stmtlist, yyS[yypt-0].stmt)
}
case 4:
//line numbers.y:42
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 5:
//line numbers.y:44
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 6:
//line numbers.y:48
{
yyVAL.stmt = &Stmt{line: yyS[yypt-3].expr.line, left: yyS[yypt-3].expr, op: yyS[yypt-2].op, right: yyS[yypt-1].expr}
}
case 7:
//line numbers.y:50
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opUse, list: yyS[yypt-2].exprlist}
}
case 8:
//line numbers.y:52
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opPrint, list: yyS[yypt-2].exprlist}
}
case 9:
//line numbers.y:54
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opCheck, list: yyS[yypt-2].exprlist}
}
case 10:
//line numbers.y:58
{
yyVAL.op = opAssign
}
case 11:
//line numbers.y:60
{
yyVAL.op = opAssignWeak
}
case 12:
yyVAL.expr = yyS[yypt-0].expr
case 13:
//line numbers.y:65
{
yyVAL.expr = yyS[yypt-1].expr
}
case 14:
//line numbers.y:67
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opAdd, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 15:
//line numbers.y:69
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opSub, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 16:
//line numbers.y:71
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opMul, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 17:
//line numbers.y:73
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opDiv, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 18:
//line numbers.y:75
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLess, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 19:
//line numbers.y:77
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLessEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 20:
//line numbers.y:79
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreater, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 21:
//line numbers.y:81
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreaterEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 22:
//line numbers.y:83
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 23:
//line numbers.y:85
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opNotEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 24:
//line numbers.y:87
{
yyVAL.expr = &Expr{line: yyS[yypt-1].line, op: opMinus, left: yyS[yypt-0].expr}
}
case 25:
//line numbers.y:89
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMax, list: yyS[yypt-1].exprlist}
}
case 26:
//line numbers.y:91
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMin, list: yyS[yypt-1].exprlist}
}
case 27:
//line numbers.y:93
{
yyVAL.expr = &Expr{line: yyS[yypt-7].line, op: opCond, list: []*Expr{yyS[yypt-5].expr, yyS[yypt-3].expr, yyS[yypt-1].expr}}
}
case 28:
//line numbers.y:97
{
yyVAL.exprlist = []*Expr{yyS[yypt-0].expr}
}
case 29:
//line numbers.y:99
{
yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr)
}
}
goto yystack /* stack new state and value */
} | identifier_body | |
y.go | //line numbers.y:2
package main
import __yyfmt__ "fmt"
//line numbers.y:2
//line numbers.y:5
type yySymType struct {
yys int
expr *Expr
exprlist []*Expr
stmt *Stmt
stmtlist []*Stmt
tok int
op Op
line Line
}
const _LE = 57346
const _GE = 57347
const _EQ = 57348
const _NE = 57349
const _UNARYMINUS = 57350
const _EQQ = 57351
const _EXPR = 57352
const _MAX = 57353
const _MIN = 57354
const _USE = 57355
const _PRINT = 57356
const _CHECK = 57357
const _COND = 57358
const _INCLUDE = 57359
var yyToknames = []string{
" <",
"_LE",
" >",
"_GE",
"_EQ",
"_NE",
" +",
" -",
" *",
" /",
"_UNARYMINUS",
" =",
"_EQQ",
"_EXPR",
"_MAX",
"_MIN",
"_USE",
"_PRINT",
"_CHECK",
"_COND",
"_INCLUDE",
}
var yyStatenames = []string{}
const yyEofCode = 1
const yyErrCode = 2
const yyMaxDepth = 200
//line numbers.y:101
//line yacctab:1
var yyExca = []int{
-1, 1,
1, -1,
-2, 0,
}
const yyNprod = 30
const yyPrivate = 57344
var yyTokenNames []string
var yyStates []string
const yyLast = 150
var yyAct = []int{
24, 19, 66, 44, 65, 44, 23, 17, 20, 21,
42, 16, 41, 22, 46, 44, 18, 40, 15, 38,
39, 25, 26, 45, 44, 43, 44, 14, 13, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 64,
63, 61, 1, 60, 10, 62, 3, 58, 59, 32,
33, 34, 35, 36, 37, 28, 29, 30, 31, 6,
30, 31, 7, 8, 9, 2, 5, 4, 68, 0,
70, 11, 12, 69, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 28, 29, 30, 31, 67, 0,
0, 0, 0, 0, 0, 0, 0, 71, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 0, 0,
0, 57, 0, 0, 0, 0, 0, 0, 0, 27,
32, 33, 34, 35, 36, 37, 28, 29, 30, 31,
}
var yyPact = []int{
-1000, -1000, 42, -1000, -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27, |
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yylex1(lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yyLast {
goto yydefault
}
yyn = yyAct[yyn]
if yyChk[yyn] == yychar { /* valid shift */
yychar = -1
yyVAL = yylval
yystate = yyn
if Errflag > 0 {
Errflag--
}
goto yystack
}
yydefault:
/* default state action */
yyn = yyDef[yystate]
if yyn == -2 {
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
/* look through exception table */
xi := 0
for {
if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
yyn = yyExca[xi+0]
if yyn < 0 || yyn == yychar {
break
}
}
yyn = yyExca[xi+1]
if yyn < 0 {
goto ret0
}
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
yylex.Error("syntax error")
Nerrs++
if yyDebug >= 1 {
__yyfmt__.Printf("%s", yyStatname(yystate))
__yyfmt__.Printf("saw %s\n", yyTokname(yychar))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
yyn = yyPact[yyS[yyp].yys] + yyErrCode
if yyn >= 0 && yyn < yyLast {
yystate = yyAct[yyn] /* simulate a shift of "error" */
if yyChk[yystate] == yyErrCode {
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
}
if yychar == yyEofCode {
goto ret1
}
yychar = -1
goto yynewstate /* try again in the same state */
}
}
/* reduction by production yyn */
if yyDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
}
yynt := yyn
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= yyR2[yyn]
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
yyn = yyR1[yyn]
yyg := yyPgo[yyn]
yyj := yyg + yyS[yyp].yys + 1
if yyj >= yyLast {
yystate = yyAct[yyg]
} else {
yystate = yyAct[yyj]
if yyChk[yystate] != -yyn {
yystate = yyAct[yyg]
}
}
// dummy call; replaced with literal code
switch yynt {
case 1:
//line numbers.y:35
{
prog = append(prog, yyS[yypt-0].stmtlist...)
}
case 2:
//line numbers.y:38
{
yyVAL.stmtlist = nil
}
case 3:
//line numbers.y:40
{
yyVAL.stmtlist = append(yyS[yypt-1].stmtlist, yyS[yypt-0].stmt)
}
case 4:
//line numbers.y:42
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 5:
//line numbers.y:44
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 6:
//line numbers.y:48
{
yyVAL.stmt = &Stmt{line: yyS[yypt-3].expr.line, left: yyS[yypt-3].expr, op: yyS[yypt-2].op, right: yyS[yypt-1].expr}
}
case 7:
//line numbers.y:50
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opUse, list: yyS[yypt-2].exprlist}
}
case 8:
//line numbers.y:52
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opPrint, list: yyS[yypt-2].exprlist}
}
case 9:
//line numbers.y:54
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opCheck, list: yyS[yypt-2].exprlist}
}
case 10:
//line numbers.y:58
{
yyVAL.op = opAssign
}
case 11:
//line numbers.y:60
{
yyVAL.op = opAssignWeak
}
case 12:
yyVAL.expr = yyS[yypt-0].expr
case 13:
//line numbers.y:65
{
yyVAL.expr = yyS[yypt-1].expr
}
case 14:
//line numbers.y:67
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opAdd, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 15:
//line numbers.y:69
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opSub, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 16:
//line numbers.y:71
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opMul, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 17:
//line numbers.y:73
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opDiv, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 18:
//line numbers.y:75
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLess, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 19:
//line numbers.y:77
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLessEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 20:
//line numbers.y:79
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreater, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 21:
//line numbers.y:81
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreaterEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 22:
//line numbers.y:83
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 23:
//line numbers.y:85
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opNotEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 24:
//line numbers.y:87
{
yyVAL.expr = &Expr{line: yyS[yypt-1].line, op: opMinus, left: yyS[yypt-0].expr}
}
case 25:
//line numbers.y:89
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMax, list: yyS[yypt-1].exprlist}
}
case 26:
//line numbers.y:91
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMin, list: yyS[yypt-1].exprlist}
}
case 27:
//line numbers.y:93
{
yyVAL.expr = &Expr{line: yyS[yypt-7].line, op: opCond, list: []*Expr{yyS[yypt-5].expr, yyS[yypt-3].expr, yyS[yypt-1].expr}}
}
case 28:
//line numbers.y:97
{
yyVAL.exprlist = []*Expr{yyS[yypt-0].expr}
}
case 29:
//line numbers.y:99
{
yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr)
}
}
goto yystack /* stack new state and value */
} | }
var yyDef = []int{ | random_line_split |
y.go | //line numbers.y:2
package main
import __yyfmt__ "fmt"
//line numbers.y:2
//line numbers.y:5
type yySymType struct {
yys int
expr *Expr
exprlist []*Expr
stmt *Stmt
stmtlist []*Stmt
tok int
op Op
line Line
}
const _LE = 57346
const _GE = 57347
const _EQ = 57348
const _NE = 57349
const _UNARYMINUS = 57350
const _EQQ = 57351
const _EXPR = 57352
const _MAX = 57353
const _MIN = 57354
const _USE = 57355
const _PRINT = 57356
const _CHECK = 57357
const _COND = 57358
const _INCLUDE = 57359
var yyToknames = []string{
" <",
"_LE",
" >",
"_GE",
"_EQ",
"_NE",
" +",
" -",
" *",
" /",
"_UNARYMINUS",
" =",
"_EQQ",
"_EXPR",
"_MAX",
"_MIN",
"_USE",
"_PRINT",
"_CHECK",
"_COND",
"_INCLUDE",
}
var yyStatenames = []string{}
const yyEofCode = 1
const yyErrCode = 2
const yyMaxDepth = 200
//line numbers.y:101
//line yacctab:1
var yyExca = []int{
-1, 1,
1, -1,
-2, 0,
}
const yyNprod = 30
const yyPrivate = 57344
var yyTokenNames []string
var yyStates []string
const yyLast = 150
var yyAct = []int{
24, 19, 66, 44, 65, 44, 23, 17, 20, 21,
42, 16, 41, 22, 46, 44, 18, 40, 15, 38,
39, 25, 26, 45, 44, 43, 44, 14, 13, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 64,
63, 61, 1, 60, 10, 62, 3, 58, 59, 32,
33, 34, 35, 36, 37, 28, 29, 30, 31, 6,
30, 31, 7, 8, 9, 2, 5, 4, 68, 0,
70, 11, 12, 69, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 28, 29, 30, 31, 67, 0,
0, 0, 0, 0, 0, 0, 0, 71, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 0, 0,
0, 57, 0, 0, 0, 0, 0, 0, 0, 27,
32, 33, 34, 35, 36, 37, 28, 29, 30, 31,
}
var yyPact = []int{
-1000, -1000, 42, -1000, -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27,
}
var yyDef = []int{
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func | (lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yyLast {
goto yydefault
}
yyn = yyAct[yyn]
if yyChk[yyn] == yychar { /* valid shift */
yychar = -1
yyVAL = yylval
yystate = yyn
if Errflag > 0 {
Errflag--
}
goto yystack
}
yydefault:
/* default state action */
yyn = yyDef[yystate]
if yyn == -2 {
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
/* look through exception table */
xi := 0
for {
if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
yyn = yyExca[xi+0]
if yyn < 0 || yyn == yychar {
break
}
}
yyn = yyExca[xi+1]
if yyn < 0 {
goto ret0
}
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
yylex.Error("syntax error")
Nerrs++
if yyDebug >= 1 {
__yyfmt__.Printf("%s", yyStatname(yystate))
__yyfmt__.Printf("saw %s\n", yyTokname(yychar))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
yyn = yyPact[yyS[yyp].yys] + yyErrCode
if yyn >= 0 && yyn < yyLast {
yystate = yyAct[yyn] /* simulate a shift of "error" */
if yyChk[yystate] == yyErrCode {
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
}
if yychar == yyEofCode {
goto ret1
}
yychar = -1
goto yynewstate /* try again in the same state */
}
}
/* reduction by production yyn */
if yyDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
}
yynt := yyn
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= yyR2[yyn]
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
yyn = yyR1[yyn]
yyg := yyPgo[yyn]
yyj := yyg + yyS[yyp].yys + 1
if yyj >= yyLast {
yystate = yyAct[yyg]
} else {
yystate = yyAct[yyj]
if yyChk[yystate] != -yyn {
yystate = yyAct[yyg]
}
}
// dummy call; replaced with literal code
switch yynt {
case 1:
//line numbers.y:35
{
prog = append(prog, yyS[yypt-0].stmtlist...)
}
case 2:
//line numbers.y:38
{
yyVAL.stmtlist = nil
}
case 3:
//line numbers.y:40
{
yyVAL.stmtlist = append(yyS[yypt-1].stmtlist, yyS[yypt-0].stmt)
}
case 4:
//line numbers.y:42
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 5:
//line numbers.y:44
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 6:
//line numbers.y:48
{
yyVAL.stmt = &Stmt{line: yyS[yypt-3].expr.line, left: yyS[yypt-3].expr, op: yyS[yypt-2].op, right: yyS[yypt-1].expr}
}
case 7:
//line numbers.y:50
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opUse, list: yyS[yypt-2].exprlist}
}
case 8:
//line numbers.y:52
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opPrint, list: yyS[yypt-2].exprlist}
}
case 9:
//line numbers.y:54
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opCheck, list: yyS[yypt-2].exprlist}
}
case 10:
//line numbers.y:58
{
yyVAL.op = opAssign
}
case 11:
//line numbers.y:60
{
yyVAL.op = opAssignWeak
}
case 12:
yyVAL.expr = yyS[yypt-0].expr
case 13:
//line numbers.y:65
{
yyVAL.expr = yyS[yypt-1].expr
}
case 14:
//line numbers.y:67
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opAdd, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 15:
//line numbers.y:69
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opSub, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 16:
//line numbers.y:71
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opMul, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 17:
//line numbers.y:73
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opDiv, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 18:
//line numbers.y:75
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLess, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 19:
//line numbers.y:77
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLessEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 20:
//line numbers.y:79
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreater, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 21:
//line numbers.y:81
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreaterEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 22:
//line numbers.y:83
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 23:
//line numbers.y:85
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opNotEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 24:
//line numbers.y:87
{
yyVAL.expr = &Expr{line: yyS[yypt-1].line, op: opMinus, left: yyS[yypt-0].expr}
}
case 25:
//line numbers.y:89
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMax, list: yyS[yypt-1].exprlist}
}
case 26:
//line numbers.y:91
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMin, list: yyS[yypt-1].exprlist}
}
case 27:
//line numbers.y:93
{
yyVAL.expr = &Expr{line: yyS[yypt-7].line, op: opCond, list: []*Expr{yyS[yypt-5].expr, yyS[yypt-3].expr, yyS[yypt-1].expr}}
}
case 28:
//line numbers.y:97
{
yyVAL.exprlist = []*Expr{yyS[yypt-0].expr}
}
case 29:
//line numbers.y:99
{
yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr)
}
}
goto yystack /* stack new state and value */
}
| yylex1 | identifier_name |
y.go | //line numbers.y:2
package main
import __yyfmt__ "fmt"
//line numbers.y:2
//line numbers.y:5
type yySymType struct {
yys int
expr *Expr
exprlist []*Expr
stmt *Stmt
stmtlist []*Stmt
tok int
op Op
line Line
}
const _LE = 57346
const _GE = 57347
const _EQ = 57348
const _NE = 57349
const _UNARYMINUS = 57350
const _EQQ = 57351
const _EXPR = 57352
const _MAX = 57353
const _MIN = 57354
const _USE = 57355
const _PRINT = 57356
const _CHECK = 57357
const _COND = 57358
const _INCLUDE = 57359
var yyToknames = []string{
" <",
"_LE",
" >",
"_GE",
"_EQ",
"_NE",
" +",
" -",
" *",
" /",
"_UNARYMINUS",
" =",
"_EQQ",
"_EXPR",
"_MAX",
"_MIN",
"_USE",
"_PRINT",
"_CHECK",
"_COND",
"_INCLUDE",
}
var yyStatenames = []string{}
const yyEofCode = 1
const yyErrCode = 2
const yyMaxDepth = 200
//line numbers.y:101
//line yacctab:1
var yyExca = []int{
-1, 1,
1, -1,
-2, 0,
}
const yyNprod = 30
const yyPrivate = 57344
var yyTokenNames []string
var yyStates []string
const yyLast = 150
var yyAct = []int{
24, 19, 66, 44, 65, 44, 23, 17, 20, 21,
42, 16, 41, 22, 46, 44, 18, 40, 15, 38,
39, 25, 26, 45, 44, 43, 44, 14, 13, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 64,
63, 61, 1, 60, 10, 62, 3, 58, 59, 32,
33, 34, 35, 36, 37, 28, 29, 30, 31, 6,
30, 31, 7, 8, 9, 2, 5, 4, 68, 0,
70, 11, 12, 69, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
28, 29, 30, 31, 28, 29, 30, 31, 67, 0,
0, 0, 0, 0, 0, 0, 0, 71, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 28, 29, 30, 31, 0, 0,
0, 57, 0, 0, 0, 0, 0, 0, 0, 27,
32, 33, 34, 35, 36, 37, 28, 29, 30, 31,
}
var yyPact = []int{
-1000, -1000, 42, -1000, -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27,
}
var yyDef = []int{
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yylex1(lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yyLast {
goto yydefault
}
yyn = yyAct[yyn]
if yyChk[yyn] == yychar { /* valid shift */
yychar = -1
yyVAL = yylval
yystate = yyn
if Errflag > 0 {
Errflag--
}
goto yystack
}
yydefault:
/* default state action */
yyn = yyDef[yystate]
if yyn == -2 {
if yychar < 0 |
/* look through exception table */
xi := 0
for {
if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
yyn = yyExca[xi+0]
if yyn < 0 || yyn == yychar {
break
}
}
yyn = yyExca[xi+1]
if yyn < 0 {
goto ret0
}
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
yylex.Error("syntax error")
Nerrs++
if yyDebug >= 1 {
__yyfmt__.Printf("%s", yyStatname(yystate))
__yyfmt__.Printf("saw %s\n", yyTokname(yychar))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
yyn = yyPact[yyS[yyp].yys] + yyErrCode
if yyn >= 0 && yyn < yyLast {
yystate = yyAct[yyn] /* simulate a shift of "error" */
if yyChk[yystate] == yyErrCode {
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", yyTokname(yychar))
}
if yychar == yyEofCode {
goto ret1
}
yychar = -1
goto yynewstate /* try again in the same state */
}
}
/* reduction by production yyn */
if yyDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
}
yynt := yyn
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= yyR2[yyn]
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
yyn = yyR1[yyn]
yyg := yyPgo[yyn]
yyj := yyg + yyS[yyp].yys + 1
if yyj >= yyLast {
yystate = yyAct[yyg]
} else {
yystate = yyAct[yyj]
if yyChk[yystate] != -yyn {
yystate = yyAct[yyg]
}
}
// dummy call; replaced with literal code
switch yynt {
case 1:
//line numbers.y:35
{
prog = append(prog, yyS[yypt-0].stmtlist...)
}
case 2:
//line numbers.y:38
{
yyVAL.stmtlist = nil
}
case 3:
//line numbers.y:40
{
yyVAL.stmtlist = append(yyS[yypt-1].stmtlist, yyS[yypt-0].stmt)
}
case 4:
//line numbers.y:42
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 5:
//line numbers.y:44
{
yyVAL.stmtlist = yyS[yypt-1].stmtlist
}
case 6:
//line numbers.y:48
{
yyVAL.stmt = &Stmt{line: yyS[yypt-3].expr.line, left: yyS[yypt-3].expr, op: yyS[yypt-2].op, right: yyS[yypt-1].expr}
}
case 7:
//line numbers.y:50
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opUse, list: yyS[yypt-2].exprlist}
}
case 8:
//line numbers.y:52
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opPrint, list: yyS[yypt-2].exprlist}
}
case 9:
//line numbers.y:54
{
yyVAL.stmt = &Stmt{line: yyS[yypt-4].line, op: opCheck, list: yyS[yypt-2].exprlist}
}
case 10:
//line numbers.y:58
{
yyVAL.op = opAssign
}
case 11:
//line numbers.y:60
{
yyVAL.op = opAssignWeak
}
case 12:
yyVAL.expr = yyS[yypt-0].expr
case 13:
//line numbers.y:65
{
yyVAL.expr = yyS[yypt-1].expr
}
case 14:
//line numbers.y:67
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opAdd, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 15:
//line numbers.y:69
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opSub, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 16:
//line numbers.y:71
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opMul, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 17:
//line numbers.y:73
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opDiv, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 18:
//line numbers.y:75
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLess, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 19:
//line numbers.y:77
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opLessEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 20:
//line numbers.y:79
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreater, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 21:
//line numbers.y:81
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opGreaterEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 22:
//line numbers.y:83
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 23:
//line numbers.y:85
{
yyVAL.expr = &Expr{line: yyS[yypt-2].expr.line, op: opNotEqual, left: yyS[yypt-2].expr, right: yyS[yypt-0].expr}
}
case 24:
//line numbers.y:87
{
yyVAL.expr = &Expr{line: yyS[yypt-1].line, op: opMinus, left: yyS[yypt-0].expr}
}
case 25:
//line numbers.y:89
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMax, list: yyS[yypt-1].exprlist}
}
case 26:
//line numbers.y:91
{
yyVAL.expr = &Expr{line: yyS[yypt-3].line, op: opMin, list: yyS[yypt-1].exprlist}
}
case 27:
//line numbers.y:93
{
yyVAL.expr = &Expr{line: yyS[yypt-7].line, op: opCond, list: []*Expr{yyS[yypt-5].expr, yyS[yypt-3].expr, yyS[yypt-1].expr}}
}
case 28:
//line numbers.y:97
{
yyVAL.exprlist = []*Expr{yyS[yypt-0].expr}
}
case 29:
//line numbers.y:99
{
yyVAL.exprlist = append(yyS[yypt-2].exprlist, yyS[yypt-0].expr)
}
}
goto yystack /* stack new state and value */
}
| {
yychar = yylex1(yylex, &yylval)
} | conditional_block |
mod.rs | use futures::{select, StreamExt};
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::sync::RwLock;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::mpsc::{self, UnboundedSender};
mod trace;
use crate::protocol::{Message, MessageBody, TryIntoMessage};
use crate::{Broker, Error, ErrorKind, Task};
use trace::{build_tracer, TraceBuilder, TracerTrait};
#[derive(Copy, Clone, Default)]
struct TaskOptions {
timeout: Option<usize>,
max_retries: Option<usize>,
min_retry_delay: usize,
max_retry_delay: usize,
}
impl TaskOptions {
fn overrides<T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker + 'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker + 'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration .
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task + 'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() {
self.broker.decrease_prefetch_count().await?;
}
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status { | },
};
}
if pending_tasks > 0 {
// Warm shutdown loop. When there are still pendings tasks we wait for them
// to finish. We get updates about pending tasks through the `event_rx` channel.
// We also watch for a second SIGINT, in which case we immediately shutdown.
info!("Waiting on {} pending tasks...", pending_tasks);
loop {
select! {
_ = signals.next() => {
warn!("Okay fine, shutting down now. See ya!");
return Err(ErrorKind::ForcedShutdown.into());
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
if pending_tasks <= 0 {
break;
}
}
},
};
}
}
info!("No more pending tasks. See ya!");
Ok(())
}
} | TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
} | random_line_split |
mod.rs | use futures::{select, StreamExt};
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::sync::RwLock;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::mpsc::{self, UnboundedSender};
mod trace;
use crate::protocol::{Message, MessageBody, TryIntoMessage};
use crate::{Broker, Error, ErrorKind, Task};
use trace::{build_tracer, TraceBuilder, TracerTrait};
#[derive(Copy, Clone, Default)]
struct TaskOptions {
timeout: Option<usize>,
max_retries: Option<usize>,
min_retry_delay: usize,
max_retry_delay: usize,
}
impl TaskOptions {
fn overrides<T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker + 'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker + 'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration .
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task + 'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() |
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
}
},
};
}
if pending_tasks > 0 {
// Warm shutdown loop. When there are still pendings tasks we wait for them
// to finish. We get updates about pending tasks through the `event_rx` channel.
// We also watch for a second SIGINT, in which case we immediately shutdown.
info!("Waiting on {} pending tasks...", pending_tasks);
loop {
select! {
_ = signals.next() => {
warn!("Okay fine, shutting down now. See ya!");
return Err(ErrorKind::ForcedShutdown.into());
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
if pending_tasks <= 0 {
break;
}
}
},
};
}
}
info!("No more pending tasks. See ya!");
Ok(())
}
}
| {
self.broker.decrease_prefetch_count().await?;
} | conditional_block |
mod.rs | use futures::{select, StreamExt};
use log::{debug, error, info, warn};
use std::collections::HashMap;
use std::sync::RwLock;
use tokio::signal::unix::{signal, SignalKind};
use tokio::sync::mpsc::{self, UnboundedSender};
mod trace;
use crate::protocol::{Message, MessageBody, TryIntoMessage};
use crate::{Broker, Error, ErrorKind, Task};
use trace::{build_tracer, TraceBuilder, TracerTrait};
#[derive(Copy, Clone, Default)]
struct TaskOptions {
timeout: Option<usize>,
max_retries: Option<usize>,
min_retry_delay: usize,
max_retry_delay: usize,
}
impl TaskOptions {
fn | <T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker + 'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker + 'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration .
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task + 'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() {
self.broker.decrease_prefetch_count().await?;
}
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
}
},
};
}
if pending_tasks > 0 {
// Warm shutdown loop. When there are still pendings tasks we wait for them
// to finish. We get updates about pending tasks through the `event_rx` channel.
// We also watch for a second SIGINT, in which case we immediately shutdown.
info!("Waiting on {} pending tasks...", pending_tasks);
loop {
select! {
_ = signals.next() => {
warn!("Okay fine, shutting down now. See ya!");
return Err(ErrorKind::ForcedShutdown.into());
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status {
TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
if pending_tasks <= 0 {
break;
}
}
},
};
}
}
info!("No more pending tasks. See ya!");
Ok(())
}
}
| overrides | identifier_name |
ddpg.py | #!/usr/bin/env python
import os
import tensorflow as tf
import numpy as np
from collections import OrderedDict
from model_states_client import set_states
import random
import rospy
from bot_utils.utils import Csv, Plot
RANDOM_SEED = 1234
BUFFER_SIZE = 100
MINIBATCH_SIZE = 25
EPISODES = 1000
STEPS = 10
ACTOR_LEARNING_RATE = 0.001
CRITIC_LEARNING_RATE = 0.01
EXPLORE = 10000.0
GAMMA_FACTOR = 0.95
PARAM_FILE = '../param/training_parameters_2018_06_20.csv'
SUM_FILE = '../SUM_DIR/summary_2018_06_20.csv'
TAU = 0.01
cube_map_pose = OrderedDict([('cube_blue_1', [1.5, -0.3, 0.80]), ('cube_blue_2', [1.5, -0.05, 0.80]),
('cube_red_1', [1.5, 0.2, 0.80]), ('cube_red_2', [1.7, -0.15, 0.80]),
('cube_green_1', [1.65, 0.05, 0.80]), ('cube_green_2', [1.6, 0.32, 0.80])
])
#
# cube_map_pose = OrderedDict([('cube_blue_1', [1.4, -0.3, 0.80]), ('cube_blue_2', [1.4, -0.3, 1.5]),
# ('cube_red_1', [1.4, 0.2, 0.83]), ('cube_red_2', [1.6, 0.2, 0.83]),
# ('cube_green_1', [1.5, 0.0, 0.83]), ('cube_green_2', [1.5, 0.32, 0.83])
# ])
reoder_pose = OrderedDict([('cube_blue_1', [1.8, 0.0, 0.80]), ('cube_blue_2', [1.8, -0.15, 0.80]),
('cube_red_1', [1.8, 0.15, 0.80]), ('cube_red_2', [1.8, 0.3, 0.80]),
('cube_green_1', [1.8, -0.3, 0.80]), ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class | (object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient})
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self.critic_action, self.critic_weights, self.critic_out = self.create_critic_network(
'critic_network')
self.target_critic_inputs_dirt, self.target_critic_action, self.target_critic_weights, self.target_critic_out = self.create_critic_network(
'critic_target')
self.update_target_network_params = [
self.target_critic_weights[i].assign(tf.multiply(self.critic_weights[i], self.tau) +
tf.multiply(self.target_critic_weights[i], 1. - self.tau))
for i in range(len(self.target_critic_weights))]
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
self.loss = tf.reduce_mean(tf.square(self.predicted_q_value - self.critic_out))
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.action_grads = tf.gradients(self.critic_out, self.critic_action)
def create_critic_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
A = tf.placeholder(dtype = tf.float32, shape = [None, 2, 3])
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense_state'):
H3 = tf.reshape(H2, [-1, 5 * 5 * 64])
H4 = tf.layers.dense(H3, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_action'):
H5 = tf.layers.dense(A, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_state_action'):
H6 = tf.reduce_sum(H5, axis = 1)
H7 = tf.layers.dense(tf.concat([H4, H6], axis = 1), 300, activation = tf.nn.relu)
with tf.variable_scope('q_out'):
O = tf.layers.dense(H7, 1)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, A, W, O
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.critic_out, self.optimize, self.loss], feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.critic_out, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_critic_out, feed_dict = {
self.target_critic_inputs_dirt: inputs,
self.target_critic_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class Memory(object):
def __init__(self, buffer_size, random_seed = 1234):
self.buffer_size = buffer_size
self.count = 0
np.random.seed(random_seed)
def add(self, s, a, r, t, s2):
experience = [[s, a, r, t, s2]]
try:
if self.buffer.shape[0] >= self.buffer_size:
self.buffer = np.delete(self.buffer, 0, axis = 0)
self.concat(experience)
else:
self.concat(experience)
except:
self.concat(experience)
self.count = self.buffer.shape[0]
def size(self):
return self.count
def concat(self, experience):
try:
self.buffer = np.concatenate((self.buffer, experience), axis = 0)
except:
self.buffer = np.array(experience)
def sample_batch(self, batch_size):
idx = range(batch_size)
np.random.shuffle(idx)
batch = self.buffer[idx]
s_batch = [elem.tolist() for elem in batch[:, 0]]
a_batch = [elem.tolist() for elem in batch[:, 1]]
r_batch = batch[:, 2]
t_batch = batch[:, 3]
s2_batch = [elem.tolist() for elem in batch[:, 4]]
return s_batch, a_batch, r_batch, t_batch, s2_batch
class Summary(object):
def __init__(self):
summary = Csv('../SUM_DIR/summary.csv')
data = summary.read()
episodes = summary.convert(data['episodes'], int)
total_action_attempt = summary.convert(data['total_action_attempt'], int)
total_grasp_attempt = summary.convert(data['total_grasp_attempt'], int)
total_grasp_success = summary.convert(data['total_grasp_success'], int)
total_place_attempt = summary.convert(data['total_place_attempt'], int)
total_place_success = summary.convert(data['total_place_success'], int)
rewards = summary.convert(data['rewards'], float)
index = 15
Plot.plot(episodes[::index], total_place_success[::index], color = 'green',
axis_max = {'x': 1650, 'y': 4}, axis_min = {'x': 0, 'y': 0},
show = False, save_path = '../total_place_success.png',
labels = {'x': 'Episodes', 'y': 'total_place_success'})
if __name__ == '__main__':
with tf.Session() as sess:
state_dim = [84, 84, 3]
action_dim = [2, 3]
actor = ActorNetwork(sess, state_dim, action_dim, ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim, CRITIC_LEARNING_RATE, TAU)
DDPG().execute(sess, actor, critic, train = True)
# Summary()
| ActorNetwork | identifier_name |
ddpg.py | #!/usr/bin/env python
import os
import tensorflow as tf
import numpy as np
from collections import OrderedDict
from model_states_client import set_states
import random
import rospy
from bot_utils.utils import Csv, Plot
RANDOM_SEED = 1234
BUFFER_SIZE = 100
MINIBATCH_SIZE = 25
EPISODES = 1000
STEPS = 10
ACTOR_LEARNING_RATE = 0.001
CRITIC_LEARNING_RATE = 0.01
EXPLORE = 10000.0
GAMMA_FACTOR = 0.95
PARAM_FILE = '../param/training_parameters_2018_06_20.csv'
SUM_FILE = '../SUM_DIR/summary_2018_06_20.csv'
TAU = 0.01
cube_map_pose = OrderedDict([('cube_blue_1', [1.5, -0.3, 0.80]), ('cube_blue_2', [1.5, -0.05, 0.80]),
('cube_red_1', [1.5, 0.2, 0.80]), ('cube_red_2', [1.7, -0.15, 0.80]),
('cube_green_1', [1.65, 0.05, 0.80]), ('cube_green_2', [1.6, 0.32, 0.80])
])
#
# cube_map_pose = OrderedDict([('cube_blue_1', [1.4, -0.3, 0.80]), ('cube_blue_2', [1.4, -0.3, 1.5]),
# ('cube_red_1', [1.4, 0.2, 0.83]), ('cube_red_2', [1.6, 0.2, 0.83]),
# ('cube_green_1', [1.5, 0.0, 0.83]), ('cube_green_2', [1.5, 0.32, 0.83])
# ])
reoder_pose = OrderedDict([('cube_blue_1', [1.8, 0.0, 0.80]), ('cube_blue_2', [1.8, -0.15, 0.80]),
('cube_red_1', [1.8, 0.15, 0.80]), ('cube_red_2', [1.8, 0.3, 0.80]),
('cube_green_1', [1.8, -0.3, 0.80]), ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class ActorNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
|
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self.critic_action, self.critic_weights, self.critic_out = self.create_critic_network(
'critic_network')
self.target_critic_inputs_dirt, self.target_critic_action, self.target_critic_weights, self.target_critic_out = self.create_critic_network(
'critic_target')
self.update_target_network_params = [
self.target_critic_weights[i].assign(tf.multiply(self.critic_weights[i], self.tau) +
tf.multiply(self.target_critic_weights[i], 1. - self.tau))
for i in range(len(self.target_critic_weights))]
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
self.loss = tf.reduce_mean(tf.square(self.predicted_q_value - self.critic_out))
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.action_grads = tf.gradients(self.critic_out, self.critic_action)
def create_critic_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
A = tf.placeholder(dtype = tf.float32, shape = [None, 2, 3])
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense_state'):
H3 = tf.reshape(H2, [-1, 5 * 5 * 64])
H4 = tf.layers.dense(H3, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_action'):
H5 = tf.layers.dense(A, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_state_action'):
H6 = tf.reduce_sum(H5, axis = 1)
H7 = tf.layers.dense(tf.concat([H4, H6], axis = 1), 300, activation = tf.nn.relu)
with tf.variable_scope('q_out'):
O = tf.layers.dense(H7, 1)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, A, W, O
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.critic_out, self.optimize, self.loss], feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.critic_out, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_critic_out, feed_dict = {
self.target_critic_inputs_dirt: inputs,
self.target_critic_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class Memory(object):
def __init__(self, buffer_size, random_seed = 1234):
self.buffer_size = buffer_size
self.count = 0
np.random.seed(random_seed)
def add(self, s, a, r, t, s2):
experience = [[s, a, r, t, s2]]
try:
if self.buffer.shape[0] >= self.buffer_size:
self.buffer = np.delete(self.buffer, 0, axis = 0)
self.concat(experience)
else:
self.concat(experience)
except:
self.concat(experience)
self.count = self.buffer.shape[0]
def size(self):
return self.count
def concat(self, experience):
try:
self.buffer = np.concatenate((self.buffer, experience), axis = 0)
except:
self.buffer = np.array(experience)
def sample_batch(self, batch_size):
idx = range(batch_size)
np.random.shuffle(idx)
batch = self.buffer[idx]
s_batch = [elem.tolist() for elem in batch[:, 0]]
a_batch = [elem.tolist() for elem in batch[:, 1]]
r_batch = batch[:, 2]
t_batch = batch[:, 3]
s2_batch = [elem.tolist() for elem in batch[:, 4]]
return s_batch, a_batch, r_batch, t_batch, s2_batch
class Summary(object):
def __init__(self):
summary = Csv('../SUM_DIR/summary.csv')
data = summary.read()
episodes = summary.convert(data['episodes'], int)
total_action_attempt = summary.convert(data['total_action_attempt'], int)
total_grasp_attempt = summary.convert(data['total_grasp_attempt'], int)
total_grasp_success = summary.convert(data['total_grasp_success'], int)
total_place_attempt = summary.convert(data['total_place_attempt'], int)
total_place_success = summary.convert(data['total_place_success'], int)
rewards = summary.convert(data['rewards'], float)
index = 15
Plot.plot(episodes[::index], total_place_success[::index], color = 'green',
axis_max = {'x': 1650, 'y': 4}, axis_min = {'x': 0, 'y': 0},
show = False, save_path = '../total_place_success.png',
labels = {'x': 'Episodes', 'y': 'total_place_success'})
if __name__ == '__main__':
with tf.Session() as sess:
state_dim = [84, 84, 3]
action_dim = [2, 3]
actor = ActorNetwork(sess, state_dim, action_dim, ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim, CRITIC_LEARNING_RATE, TAU)
DDPG().execute(sess, actor, critic, train = True)
# Summary()
| self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient}) | identifier_body |
ddpg.py | #!/usr/bin/env python
import os
import tensorflow as tf
import numpy as np
from collections import OrderedDict
from model_states_client import set_states
import random
import rospy
from bot_utils.utils import Csv, Plot
RANDOM_SEED = 1234
BUFFER_SIZE = 100
MINIBATCH_SIZE = 25
EPISODES = 1000
STEPS = 10
ACTOR_LEARNING_RATE = 0.001
CRITIC_LEARNING_RATE = 0.01
EXPLORE = 10000.0
GAMMA_FACTOR = 0.95
PARAM_FILE = '../param/training_parameters_2018_06_20.csv'
SUM_FILE = '../SUM_DIR/summary_2018_06_20.csv'
TAU = 0.01
cube_map_pose = OrderedDict([('cube_blue_1', [1.5, -0.3, 0.80]), ('cube_blue_2', [1.5, -0.05, 0.80]),
('cube_red_1', [1.5, 0.2, 0.80]), ('cube_red_2', [1.7, -0.15, 0.80]),
('cube_green_1', [1.65, 0.05, 0.80]), ('cube_green_2', [1.6, 0.32, 0.80])
])
#
# cube_map_pose = OrderedDict([('cube_blue_1', [1.4, -0.3, 0.80]), ('cube_blue_2', [1.4, -0.3, 1.5]),
# ('cube_red_1', [1.4, 0.2, 0.83]), ('cube_red_2', [1.6, 0.2, 0.83]),
# ('cube_green_1', [1.5, 0.0, 0.83]), ('cube_green_2', [1.5, 0.32, 0.83])
# ])
reoder_pose = OrderedDict([('cube_blue_1', [1.8, 0.0, 0.80]), ('cube_blue_2', [1.8, -0.15, 0.80]),
('cube_red_1', [1.8, 0.15, 0.80]), ('cube_red_2', [1.8, 0.3, 0.80]),
('cube_green_1', [1.8, -0.3, 0.80]), ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
|
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class ActorNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient})
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self.critic_action, self.critic_weights, self.critic_out = self.create_critic_network(
'critic_network')
self.target_critic_inputs_dirt, self.target_critic_action, self.target_critic_weights, self.target_critic_out = self.create_critic_network(
'critic_target')
self.update_target_network_params = [
self.target_critic_weights[i].assign(tf.multiply(self.critic_weights[i], self.tau) +
tf.multiply(self.target_critic_weights[i], 1. - self.tau))
for i in range(len(self.target_critic_weights))]
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
self.loss = tf.reduce_mean(tf.square(self.predicted_q_value - self.critic_out))
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.action_grads = tf.gradients(self.critic_out, self.critic_action)
def create_critic_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
A = tf.placeholder(dtype = tf.float32, shape = [None, 2, 3])
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense_state'):
H3 = tf.reshape(H2, [-1, 5 * 5 * 64])
H4 = tf.layers.dense(H3, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_action'):
H5 = tf.layers.dense(A, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_state_action'):
H6 = tf.reduce_sum(H5, axis = 1)
H7 = tf.layers.dense(tf.concat([H4, H6], axis = 1), 300, activation = tf.nn.relu)
with tf.variable_scope('q_out'):
O = tf.layers.dense(H7, 1)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, A, W, O
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.critic_out, self.optimize, self.loss], feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.critic_out, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_critic_out, feed_dict = {
self.target_critic_inputs_dirt: inputs,
self.target_critic_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class Memory(object):
def __init__(self, buffer_size, random_seed = 1234):
self.buffer_size = buffer_size
self.count = 0
np.random.seed(random_seed)
def add(self, s, a, r, t, s2):
experience = [[s, a, r, t, s2]]
try:
if self.buffer.shape[0] >= self.buffer_size:
self.buffer = np.delete(self.buffer, 0, axis = 0)
self.concat(experience)
else:
self.concat(experience)
except:
self.concat(experience)
self.count = self.buffer.shape[0]
def size(self):
return self.count
def concat(self, experience):
try:
self.buffer = np.concatenate((self.buffer, experience), axis = 0)
except:
self.buffer = np.array(experience)
def sample_batch(self, batch_size):
idx = range(batch_size)
np.random.shuffle(idx)
batch = self.buffer[idx]
s_batch = [elem.tolist() for elem in batch[:, 0]]
a_batch = [elem.tolist() for elem in batch[:, 1]]
r_batch = batch[:, 2]
t_batch = batch[:, 3]
s2_batch = [elem.tolist() for elem in batch[:, 4]]
return s_batch, a_batch, r_batch, t_batch, s2_batch
class Summary(object):
def __init__(self):
summary = Csv('../SUM_DIR/summary.csv')
data = summary.read()
episodes = summary.convert(data['episodes'], int)
total_action_attempt = summary.convert(data['total_action_attempt'], int)
total_grasp_attempt = summary.convert(data['total_grasp_attempt'], int)
total_grasp_success = summary.convert(data['total_grasp_success'], int)
total_place_attempt = summary.convert(data['total_place_attempt'], int)
total_place_success = summary.convert(data['total_place_success'], int)
rewards = summary.convert(data['rewards'], float)
index = 15
Plot.plot(episodes[::index], total_place_success[::index], color = 'green',
axis_max = {'x': 1650, 'y': 4}, axis_min = {'x': 0, 'y': 0},
show = False, save_path = '../total_place_success.png',
labels = {'x': 'Episodes', 'y': 'total_place_success'})
if __name__ == '__main__':
with tf.Session() as sess:
state_dim = [84, 84, 3]
action_dim = [2, 3]
actor = ActorNetwork(sess, state_dim, action_dim, ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim, CRITIC_LEARNING_RATE, TAU)
DDPG().execute(sess, actor, critic, train = True)
# Summary()
| delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass | conditional_block |
ddpg.py | #!/usr/bin/env python
import os
import tensorflow as tf
import numpy as np
from collections import OrderedDict
from model_states_client import set_states
import random
import rospy
from bot_utils.utils import Csv, Plot
RANDOM_SEED = 1234
BUFFER_SIZE = 100
MINIBATCH_SIZE = 25
EPISODES = 1000
STEPS = 10
ACTOR_LEARNING_RATE = 0.001
CRITIC_LEARNING_RATE = 0.01
EXPLORE = 10000.0
GAMMA_FACTOR = 0.95 |
TAU = 0.01
cube_map_pose = OrderedDict([('cube_blue_1', [1.5, -0.3, 0.80]), ('cube_blue_2', [1.5, -0.05, 0.80]),
('cube_red_1', [1.5, 0.2, 0.80]), ('cube_red_2', [1.7, -0.15, 0.80]),
('cube_green_1', [1.65, 0.05, 0.80]), ('cube_green_2', [1.6, 0.32, 0.80])
])
#
# cube_map_pose = OrderedDict([('cube_blue_1', [1.4, -0.3, 0.80]), ('cube_blue_2', [1.4, -0.3, 1.5]),
# ('cube_red_1', [1.4, 0.2, 0.83]), ('cube_red_2', [1.6, 0.2, 0.83]),
# ('cube_green_1', [1.5, 0.0, 0.83]), ('cube_green_2', [1.5, 0.32, 0.83])
# ])
reoder_pose = OrderedDict([('cube_blue_1', [1.8, 0.0, 0.80]), ('cube_blue_2', [1.8, -0.15, 0.80]),
('cube_red_1', [1.8, 0.15, 0.80]), ('cube_red_2', [1.8, 0.3, 0.80]),
('cube_green_1', [1.8, -0.3, 0.80]), ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class ActorNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient})
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self.critic_action, self.critic_weights, self.critic_out = self.create_critic_network(
'critic_network')
self.target_critic_inputs_dirt, self.target_critic_action, self.target_critic_weights, self.target_critic_out = self.create_critic_network(
'critic_target')
self.update_target_network_params = [
self.target_critic_weights[i].assign(tf.multiply(self.critic_weights[i], self.tau) +
tf.multiply(self.target_critic_weights[i], 1. - self.tau))
for i in range(len(self.target_critic_weights))]
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
self.loss = tf.reduce_mean(tf.square(self.predicted_q_value - self.critic_out))
self.optimize = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
self.action_grads = tf.gradients(self.critic_out, self.critic_action)
def create_critic_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
A = tf.placeholder(dtype = tf.float32, shape = [None, 2, 3])
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense_state'):
H3 = tf.reshape(H2, [-1, 5 * 5 * 64])
H4 = tf.layers.dense(H3, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_action'):
H5 = tf.layers.dense(A, 300, activation = tf.nn.relu)
with tf.variable_scope('dense_state_action'):
H6 = tf.reduce_sum(H5, axis = 1)
H7 = tf.layers.dense(tf.concat([H4, H6], axis = 1), 300, activation = tf.nn.relu)
with tf.variable_scope('q_out'):
O = tf.layers.dense(H7, 1)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, A, W, O
def train(self, inputs, action, predicted_q_value):
return self.sess.run([self.critic_out, self.optimize, self.loss], feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action,
self.predicted_q_value: predicted_q_value
})
def predict(self, inputs, action):
return self.sess.run(self.critic_out, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: action
})
def predict_target(self, inputs, action):
return self.sess.run(self.target_critic_out, feed_dict = {
self.target_critic_inputs_dirt: inputs,
self.target_critic_action: action
})
def action_gradients(self, inputs, actions):
return self.sess.run(self.action_grads, feed_dict = {
self.critic_inputs_dirt: inputs,
self.critic_action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class Memory(object):
def __init__(self, buffer_size, random_seed = 1234):
self.buffer_size = buffer_size
self.count = 0
np.random.seed(random_seed)
def add(self, s, a, r, t, s2):
experience = [[s, a, r, t, s2]]
try:
if self.buffer.shape[0] >= self.buffer_size:
self.buffer = np.delete(self.buffer, 0, axis = 0)
self.concat(experience)
else:
self.concat(experience)
except:
self.concat(experience)
self.count = self.buffer.shape[0]
def size(self):
return self.count
def concat(self, experience):
try:
self.buffer = np.concatenate((self.buffer, experience), axis = 0)
except:
self.buffer = np.array(experience)
def sample_batch(self, batch_size):
idx = range(batch_size)
np.random.shuffle(idx)
batch = self.buffer[idx]
s_batch = [elem.tolist() for elem in batch[:, 0]]
a_batch = [elem.tolist() for elem in batch[:, 1]]
r_batch = batch[:, 2]
t_batch = batch[:, 3]
s2_batch = [elem.tolist() for elem in batch[:, 4]]
return s_batch, a_batch, r_batch, t_batch, s2_batch
class Summary(object):
def __init__(self):
summary = Csv('../SUM_DIR/summary.csv')
data = summary.read()
episodes = summary.convert(data['episodes'], int)
total_action_attempt = summary.convert(data['total_action_attempt'], int)
total_grasp_attempt = summary.convert(data['total_grasp_attempt'], int)
total_grasp_success = summary.convert(data['total_grasp_success'], int)
total_place_attempt = summary.convert(data['total_place_attempt'], int)
total_place_success = summary.convert(data['total_place_success'], int)
rewards = summary.convert(data['rewards'], float)
index = 15
Plot.plot(episodes[::index], total_place_success[::index], color = 'green',
axis_max = {'x': 1650, 'y': 4}, axis_min = {'x': 0, 'y': 0},
show = False, save_path = '../total_place_success.png',
labels = {'x': 'Episodes', 'y': 'total_place_success'})
if __name__ == '__main__':
with tf.Session() as sess:
state_dim = [84, 84, 3]
action_dim = [2, 3]
actor = ActorNetwork(sess, state_dim, action_dim, ACTOR_LEARNING_RATE, TAU)
critic = CriticNetwork(sess, state_dim, action_dim, CRITIC_LEARNING_RATE, TAU)
DDPG().execute(sess, actor, critic, train = True)
# Summary() |
PARAM_FILE = '../param/training_parameters_2018_06_20.csv'
SUM_FILE = '../SUM_DIR/summary_2018_06_20.csv' | random_line_split |
core.ts | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {exec} from 'child_process';
import {promisify} from 'util';
const execAsync = promisify(exec);
import {load} from 'js-yaml';
import {logger as defaultLogger, GCFLogger} from 'gcf-utils';
import {sign} from 'jsonwebtoken';
import {request} from 'gaxios';
import {CloudBuildClient} from '@google-cloud/cloudbuild';
import {Octokit} from '@octokit/rest';
// eslint-disable-next-line node/no-extraneous-import
import {RequestError} from '@octokit/types';
// eslint-disable-next-line node/no-extraneous-import
import {OwlBotLock, OWL_BOT_LOCK_PATH, owlBotLockFrom} from './config-files';
import {OctokitFactory} from './octokit-util';
import {OWL_BOT_IGNORE} from './labels';
import {OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER} from './constants';
import {CopyCodeIntoPullRequestAction} from './copy-code';
import {google} from '@google-cloud/cloudbuild/build/protos/protos';
interface BuildArgs {
image: string;
privateKey: string;
appId: number;
installation: number;
repo: string;
pr: number;
project?: string;
trigger: string;
defaultBranch?: string;
}
export interface CheckArgs {
privateKey: string;
appId: number;
installation: number;
pr: number;
repo: string;
summary: string;
conclusion: 'success' | 'failure';
detailsURL: string;
text: string;
title: string;
}
interface AuthArgs {
privateKey: string;
appId: number;
installation: number;
}
interface BuildSummary {
conclusion: 'success' | 'failure';
summary: string;
text: string;
}
interface BuildResponse extends BuildSummary {
detailsURL: string;
}
interface Commit {
sha: string;
}
interface Token {
token: string;
expires_at: string;
permissions: object;
repository_selection: string;
}
export const OWL_BOT_LOCK_UPDATE = 'owl-bot-update-lock';
export const OWL_BOT_COPY = 'owl-bot-copy';
// Check back on the build every 1/3 of a minute (20000ms)
const PING_DELAY = 20000;
// 60 min * 3 hours * 3 * 1/3s of a minute (3 hours)
const TOTAL_PINGS = 3 * 60 * 3;
export async function triggerPostProcessBuild(
args: BuildArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
): Promise<BuildResponse | null> {
const token = await core.getGitHubShortLivedAccessToken(
args.privateKey,
args.appId,
args.installation
);
const project = args.project || process.env.PROJECT_ID;
if (!project) {
throw Error('gcloud project must be provided');
}
const [owner, repo] = args.repo.split('/');
if (!octokit) {
octokit = await core.getAuthenticatedOctokit(token.token);
}
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: args.pr,
});
// See if someone asked owl bot to ignore this PR.
if (prData.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`Ignoring ${owner}/${repo} #${args.pr} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return null;
}
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${args.pr}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const cb = core.getCloudBuildInstance();
const [resp] = await cb.runBuildTrigger({
projectId: project,
triggerId: args.trigger,
source: {
projectId: project,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token.token,
_PR: args.pr.toString(),
_PR_BRANCH: prData.head.ref,
_OWNER: owner,
_REPOSITORY: repo,
_PR_OWNER: prOwner,
_PR_REPOSITORY: prRepo,
// _CONTAINER must contain the image digest. For example:
// gcr.io/repo-automation-tools/nodejs-post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id;
const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise();
const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
| f (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubShortLivedAccessToken(
privateKey: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cache && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis"
* @param repo the rep name; ex: "nodejs-vision"
* @param path the file path within the repo; ex: ".github/.OwlBot.lock.yaml"
* @param ref the commit hash
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function getFileContent(
owner: string,
repo: string,
path: string,
ref: string,
octokit: Octokit
): Promise<string | undefined> {
try {
const data = (
await octokit.repos.getContent({
owner,
repo,
path,
ref,
})
).data as {content: string | undefined; encoding: string};
if (!data.content) {
return undefined;
}
if (data.encoding !== 'base64') {
throw Error(`unexpected encoding ${data.encoding} in ${owner}/${repo}`);
}
const text = Buffer.from(data.content, 'base64').toString('utf8');
return text;
} catch (e) {
const err = e as RequestError;
if (err.status === 404) return undefined;
else throw err;
}
}
/**
* Given a git repository and sha, returns the files modified by the
* given commit.
* @param path path to git repository on disk.
* @param sha commit to list modified files for.
* @returns a list of file paths.
*/
export async function getFilesModifiedBySha(
path: string,
sha: string
): Promise<string[]> {
// --no-renames to avoid
// warning: inexact rename detection was skipped due to too many files.
const out = await execAsync(`git show --name-only --no-renames ${sha}`, {
cwd: path,
// Handle 100,000+ files changing:
maxBuffer: 1024 * 1024 * 512,
});
if (out.stderr) throw Error(out.stderr);
const filesRaw = out.stdout.trim();
const files = [];
// We walk the output in reverse, since the file list is shown at the end
// of git show:
for (const file of filesRaw.split(/\r?\n/).reverse()) {
// There will be a blank line between the commit message and the
// files list, we use this as a stopping point:
if (file === '') break;
files.push(file);
}
return files;
}
/**
* Returns an iterator that returns the most recent commits added to a repository.
* @param repoFull org/repo
* @param octokit authenticated octokit instance.
*/
export async function* commitsIterator(
repoFull: string,
octokit: Octokit,
per_page = 25
) {
const [owner, repo] = repoFull.split('/');
for await (const response of octokit.paginate.iterator(
octokit.repos.listCommits,
{
owner,
repo,
per_page,
}
)) {
for (const commit of response.data) {
yield commit.sha;
}
}
}
/*
* Detect whether there's an update loop created by OwlBot post-processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for loop.
* @param octokit authenticated instance of octokit.
*/
async function hasOwlBotLoop(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
// If N (where N=circuitBreaker) commits are added to a pull-request
// by the post-processor one after another, this indicates that we're
// potentially looping, e.g., flip flopping a date between 2020 and 2021.
//
// It's okay to have 4 commits from Owl-Bot in a row, e.g., a commit for
// a code update plus the post processor.
//
// It's also okay to run the post-processor many more than circuitBreaker
// times on a long lived PR, with human edits being made.
const circuitBreaker = 5;
// TODO(bcoe): we should move to an async iterator for listCommits:
const commits = (
await octokit.pulls.listCommits({
pull_number: prNumber,
owner,
repo,
per_page: 100,
})
).data;
// get the most recent commits (limit by circuit breaker)
const lastFewCommits = commits
.sort((a, b) => {
const aDate = new Date(a.commit.author?.date || 0);
const bDate = new Date(b.commit.author?.date || 0);
// sort desc
return bDate.valueOf() - aDate.valueOf();
})
.slice(0, circuitBreaker);
// not enough commits to trigger a circuit breaker
if (lastFewCommits.length < circuitBreaker) return false;
for (const commit of lastFewCommits) {
if (
!commit.commit.message.includes(
OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER
)
)
return false;
}
// all of the recent commits were from owl-bot
return true;
}
/*
* Return whether or not the last commit was from OwlBot post processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for commit.
* @param octokit authenticated instance of octokit.
* @returns Promise was the last commit from OwlBot?
*/
async function lastCommitFromOwlBotPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
const commitMessages: Array<string> = [];
for await (const response of octokit.paginate.iterator(
octokit.rest.pulls.listCommits,
{
pull_number: prNumber,
owner,
repo,
per_page: 100,
}
)) {
for (const {commit} of response.data) {
commitMessages.push(commit.message);
}
}
const message = commitMessages[commitMessages.length - 1];
return message.includes(OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER);
}
/**
* After the post processor runs, we may want to close the pull request or
* promote it to "ready for review."
*/
async function updatePullRequestAfterPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit,
logger: GCFLogger = defaultLogger
): Promise<void> {
const {data: pull} = await octokit.pulls.get({
owner,
repo,
pull_number: prNumber,
});
// If someone asked owl bot to ignore this PR, never close or promote it.
if (pull.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return;
}
// If the pull request was not created by owl bot, never close or promote it.
const owlBotLabels = [OWL_BOT_LOCK_UPDATE, OWL_BOT_COPY];
if (!pull.labels.find(label => owlBotLabels.indexOf(label.name ?? '') >= 0)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's not labeled with ${owlBotLabels}.`
);
return;
}
// If running post-processor has created a noop change, close the
// pull request:
const files = (
await octokit.pulls.listFiles({
owner,
repo,
pull_number: prNumber,
})
).data;
if (!files.length) {
logger.info(
`Closing pull request ${pull.html_url} because listFiles() returned empty.`
);
await octokit.pulls.update({
owner,
repo,
pull_number: prNumber,
state: 'closed',
});
if (!pull?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${prNumber}`);
if (pull.head.repo.full_name === `${owner}/${repo}`) {
logger.info(`Deleting branch ${pull.head.ref}`);
await octokit.git.deleteRef({owner, repo, ref: `heads/${pull.head.ref}`});
} else {
logger.info(
`I won't delete the ${pull.head.ref} branch in the fork ` +
`${pull.head.repo.full_name}`
);
}
}
}
export interface RegenerateArgs {
owner: string;
repo: string;
branch: string;
prNumber: number;
gcpProjectId: string;
buildTriggerId: string;
action: CopyCodeIntoPullRequestAction;
}
export async function triggerRegeneratePullRequest(
octokitFactory: OctokitFactory,
args: RegenerateArgs
): Promise<void> {
const token = await octokitFactory.getGitHubShortLivedAccessToken();
const octokit = await octokitFactory.getShortLivedOctokit(token);
// No matter what the outcome, we'll create a comment below.
const _createComment = async (body: string): Promise<void> => {
await octokit.issues.createComment({
owner: args.owner,
repo: args.repo,
issue_number: args.prNumber,
body,
});
};
const reportError = (error: string) => {
console.error(error);
return _createComment(error);
};
const reportInfo = (text: string) => {
console.info(text);
return _createComment(text);
};
// The user checked the "Regenerate this pull request" box.
let buildName = '';
try {
const cb = core.getCloudBuildInstance();
// Is there a reason to wait for for the long-running build to complete
// here?
const [resp] = await cb.runBuildTrigger({
projectId: args.gcpProjectId,
triggerId: args.buildTriggerId,
source: {
projectId: args.gcpProjectId,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token,
_PR: args.prNumber.toString(),
_PR_BRANCH: args.branch,
_PR_OWNER: args.owner,
_REPOSITORY: args.repo,
_ACTION: args.action,
},
},
});
buildName = resp?.name ?? '';
} catch (err) {
await reportError(`Owl Bot failed to regenerate pull request ${args.prNumber}.
${err}`);
return;
}
await reportInfo(`Owl bot is regenerating pull request ${args.prNumber}...
Build name: ${stripBuildName(buildName)}`);
}
/**
* The build name returned by runBuildTrigger includes a full path with the
* project name, and I'd rather not show that to the world.
*/
function stripBuildName(buildName: string): string {
const chunks = buildName.split(/\//);
return chunks.length > 0 ? chunks[chunks.length - 1] : '';
}
export const core = {
commitsIterator,
createCheck,
getAccessTokenURL,
getAuthenticatedOctokit,
getCloudBuildInstance,
getFilesModifiedBySha,
getFileContent,
getGitHubShortLivedAccessToken,
fetchOwlBotLock,
parseOwlBotLock,
hasOwlBotLoop,
lastCommitFromOwlBotPostProcessor,
OWL_BOT_LOCK_PATH,
triggerPostProcessBuild,
triggerRegeneratePullRequest,
updatePullRequestAfterPostProcessor,
OWL_BOT_LOCK_UPDATE: OWL_BOT_LOCK_UPDATE,
};
| conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
i | conditional_block |
core.ts | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {exec} from 'child_process';
import {promisify} from 'util';
const execAsync = promisify(exec);
import {load} from 'js-yaml';
import {logger as defaultLogger, GCFLogger} from 'gcf-utils';
import {sign} from 'jsonwebtoken';
import {request} from 'gaxios';
import {CloudBuildClient} from '@google-cloud/cloudbuild';
import {Octokit} from '@octokit/rest';
// eslint-disable-next-line node/no-extraneous-import
import {RequestError} from '@octokit/types';
// eslint-disable-next-line node/no-extraneous-import
import {OwlBotLock, OWL_BOT_LOCK_PATH, owlBotLockFrom} from './config-files';
import {OctokitFactory} from './octokit-util';
import {OWL_BOT_IGNORE} from './labels';
import {OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER} from './constants';
import {CopyCodeIntoPullRequestAction} from './copy-code';
import {google} from '@google-cloud/cloudbuild/build/protos/protos';
interface BuildArgs {
image: string;
privateKey: string;
appId: number;
installation: number;
repo: string;
pr: number;
project?: string;
trigger: string;
defaultBranch?: string;
}
export interface CheckArgs {
privateKey: string;
appId: number;
installation: number;
pr: number;
repo: string;
summary: string;
conclusion: 'success' | 'failure';
detailsURL: string;
text: string;
title: string;
}
interface AuthArgs {
privateKey: string;
appId: number;
installation: number;
}
interface BuildSummary {
conclusion: 'success' | 'failure';
summary: string;
text: string;
}
interface BuildResponse extends BuildSummary {
detailsURL: string;
}
interface Commit {
sha: string;
}
interface Token {
token: string;
expires_at: string;
permissions: object;
repository_selection: string;
}
export const OWL_BOT_LOCK_UPDATE = 'owl-bot-update-lock';
export const OWL_BOT_COPY = 'owl-bot-copy';
// Check back on the build every 1/3 of a minute (20000ms)
const PING_DELAY = 20000;
// 60 min * 3 hours * 3 * 1/3s of a minute (3 hours)
const TOTAL_PINGS = 3 * 60 * 3;
export async function triggerPostProcessBuild(
args: BuildArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
): Promise<BuildResponse | null> {
const token = await core.getGitHubShortLivedAccessToken(
args.privateKey,
args.appId,
args.installation
);
const project = args.project || process.env.PROJECT_ID;
if (!project) {
throw Error('gcloud project must be provided');
}
const [owner, repo] = args.repo.split('/');
if (!octokit) {
octokit = await core.getAuthenticatedOctokit(token.token);
}
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: args.pr,
});
// See if someone asked owl bot to ignore this PR.
if (prData.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`Ignoring ${owner}/${repo} #${args.pr} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return null;
}
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${args.pr}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const cb = core.getCloudBuildInstance();
const [resp] = await cb.runBuildTrigger({
projectId: project,
triggerId: args.trigger,
source: {
projectId: project,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token.token,
_PR: args.pr.toString(),
_PR_BRANCH: prData.head.ref,
_OWNER: owner,
_REPOSITORY: repo,
_PR_OWNER: prOwner,
_PR_REPOSITORY: prRepo,
// _CONTAINER must contain the image digest. For example:
// gcr.io/repo-automation-tools/nodejs-post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id;
const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise();
const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
if (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubShortLivedAccessToken(
privateKey: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cac | getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis"
* @param repo the rep name; ex: "nodejs-vision"
* @param path the file path within the repo; ex: ".github/.OwlBot.lock.yaml"
* @param ref the commit hash
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function getFileContent(
owner: string,
repo: string,
path: string,
ref: string,
octokit: Octokit
): Promise<string | undefined> {
try {
const data = (
await octokit.repos.getContent({
owner,
repo,
path,
ref,
})
).data as {content: string | undefined; encoding: string};
if (!data.content) {
return undefined;
}
if (data.encoding !== 'base64') {
throw Error(`unexpected encoding ${data.encoding} in ${owner}/${repo}`);
}
const text = Buffer.from(data.content, 'base64').toString('utf8');
return text;
} catch (e) {
const err = e as RequestError;
if (err.status === 404) return undefined;
else throw err;
}
}
/**
* Given a git repository and sha, returns the files modified by the
* given commit.
* @param path path to git repository on disk.
* @param sha commit to list modified files for.
* @returns a list of file paths.
*/
export async function getFilesModifiedBySha(
path: string,
sha: string
): Promise<string[]> {
// --no-renames to avoid
// warning: inexact rename detection was skipped due to too many files.
const out = await execAsync(`git show --name-only --no-renames ${sha}`, {
cwd: path,
// Handle 100,000+ files changing:
maxBuffer: 1024 * 1024 * 512,
});
if (out.stderr) throw Error(out.stderr);
const filesRaw = out.stdout.trim();
const files = [];
// We walk the output in reverse, since the file list is shown at the end
// of git show:
for (const file of filesRaw.split(/\r?\n/).reverse()) {
// There will be a blank line between the commit message and the
// files list, we use this as a stopping point:
if (file === '') break;
files.push(file);
}
return files;
}
/**
* Returns an iterator that returns the most recent commits added to a repository.
* @param repoFull org/repo
* @param octokit authenticated octokit instance.
*/
export async function* commitsIterator(
repoFull: string,
octokit: Octokit,
per_page = 25
) {
const [owner, repo] = repoFull.split('/');
for await (const response of octokit.paginate.iterator(
octokit.repos.listCommits,
{
owner,
repo,
per_page,
}
)) {
for (const commit of response.data) {
yield commit.sha;
}
}
}
/*
* Detect whether there's an update loop created by OwlBot post-processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for loop.
* @param octokit authenticated instance of octokit.
*/
async function hasOwlBotLoop(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
// If N (where N=circuitBreaker) commits are added to a pull-request
// by the post-processor one after another, this indicates that we're
// potentially looping, e.g., flip flopping a date between 2020 and 2021.
//
// It's okay to have 4 commits from Owl-Bot in a row, e.g., a commit for
// a code update plus the post processor.
//
// It's also okay to run the post-processor many more than circuitBreaker
// times on a long lived PR, with human edits being made.
const circuitBreaker = 5;
// TODO(bcoe): we should move to an async iterator for listCommits:
const commits = (
await octokit.pulls.listCommits({
pull_number: prNumber,
owner,
repo,
per_page: 100,
})
).data;
// get the most recent commits (limit by circuit breaker)
const lastFewCommits = commits
.sort((a, b) => {
const aDate = new Date(a.commit.author?.date || 0);
const bDate = new Date(b.commit.author?.date || 0);
// sort desc
return bDate.valueOf() - aDate.valueOf();
})
.slice(0, circuitBreaker);
// not enough commits to trigger a circuit breaker
if (lastFewCommits.length < circuitBreaker) return false;
for (const commit of lastFewCommits) {
if (
!commit.commit.message.includes(
OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER
)
)
return false;
}
// all of the recent commits were from owl-bot
return true;
}
/*
* Return whether or not the last commit was from OwlBot post processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for commit.
* @param octokit authenticated instance of octokit.
* @returns Promise was the last commit from OwlBot?
*/
async function lastCommitFromOwlBotPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
const commitMessages: Array<string> = [];
for await (const response of octokit.paginate.iterator(
octokit.rest.pulls.listCommits,
{
pull_number: prNumber,
owner,
repo,
per_page: 100,
}
)) {
for (const {commit} of response.data) {
commitMessages.push(commit.message);
}
}
const message = commitMessages[commitMessages.length - 1];
return message.includes(OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER);
}
/**
* After the post processor runs, we may want to close the pull request or
* promote it to "ready for review."
*/
async function updatePullRequestAfterPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit,
logger: GCFLogger = defaultLogger
): Promise<void> {
const {data: pull} = await octokit.pulls.get({
owner,
repo,
pull_number: prNumber,
});
// If someone asked owl bot to ignore this PR, never close or promote it.
if (pull.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return;
}
// If the pull request was not created by owl bot, never close or promote it.
const owlBotLabels = [OWL_BOT_LOCK_UPDATE, OWL_BOT_COPY];
if (!pull.labels.find(label => owlBotLabels.indexOf(label.name ?? '') >= 0)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's not labeled with ${owlBotLabels}.`
);
return;
}
// If running post-processor has created a noop change, close the
// pull request:
const files = (
await octokit.pulls.listFiles({
owner,
repo,
pull_number: prNumber,
})
).data;
if (!files.length) {
logger.info(
`Closing pull request ${pull.html_url} because listFiles() returned empty.`
);
await octokit.pulls.update({
owner,
repo,
pull_number: prNumber,
state: 'closed',
});
if (!pull?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${prNumber}`);
if (pull.head.repo.full_name === `${owner}/${repo}`) {
logger.info(`Deleting branch ${pull.head.ref}`);
await octokit.git.deleteRef({owner, repo, ref: `heads/${pull.head.ref}`});
} else {
logger.info(
`I won't delete the ${pull.head.ref} branch in the fork ` +
`${pull.head.repo.full_name}`
);
}
}
}
export interface RegenerateArgs {
owner: string;
repo: string;
branch: string;
prNumber: number;
gcpProjectId: string;
buildTriggerId: string;
action: CopyCodeIntoPullRequestAction;
}
export async function triggerRegeneratePullRequest(
octokitFactory: OctokitFactory,
args: RegenerateArgs
): Promise<void> {
const token = await octokitFactory.getGitHubShortLivedAccessToken();
const octokit = await octokitFactory.getShortLivedOctokit(token);
// No matter what the outcome, we'll create a comment below.
const _createComment = async (body: string): Promise<void> => {
await octokit.issues.createComment({
owner: args.owner,
repo: args.repo,
issue_number: args.prNumber,
body,
});
};
const reportError = (error: string) => {
console.error(error);
return _createComment(error);
};
const reportInfo = (text: string) => {
console.info(text);
return _createComment(text);
};
// The user checked the "Regenerate this pull request" box.
let buildName = '';
try {
const cb = core.getCloudBuildInstance();
// Is there a reason to wait for for the long-running build to complete
// here?
const [resp] = await cb.runBuildTrigger({
projectId: args.gcpProjectId,
triggerId: args.buildTriggerId,
source: {
projectId: args.gcpProjectId,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token,
_PR: args.prNumber.toString(),
_PR_BRANCH: args.branch,
_PR_OWNER: args.owner,
_REPOSITORY: args.repo,
_ACTION: args.action,
},
},
});
buildName = resp?.name ?? '';
} catch (err) {
await reportError(`Owl Bot failed to regenerate pull request ${args.prNumber}.
${err}`);
return;
}
await reportInfo(`Owl bot is regenerating pull request ${args.prNumber}...
Build name: ${stripBuildName(buildName)}`);
}
/**
* The build name returned by runBuildTrigger includes a full path with the
* project name, and I'd rather not show that to the world.
*/
function stripBuildName(buildName: string): string {
const chunks = buildName.split(/\//);
return chunks.length > 0 ? chunks[chunks.length - 1] : '';
}
export const core = {
commitsIterator,
createCheck,
getAccessTokenURL,
getAuthenticatedOctokit,
getCloudBuildInstance,
getFilesModifiedBySha,
getFileContent,
getGitHubShortLivedAccessToken,
fetchOwlBotLock,
parseOwlBotLock,
hasOwlBotLoop,
lastCommitFromOwlBotPostProcessor,
OWL_BOT_LOCK_PATH,
triggerPostProcessBuild,
triggerRegeneratePullRequest,
updatePullRequestAfterPostProcessor,
OWL_BOT_LOCK_UPDATE: OWL_BOT_LOCK_UPDATE,
};
| he && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function | identifier_body |
core.ts | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {exec} from 'child_process';
import {promisify} from 'util';
const execAsync = promisify(exec);
import {load} from 'js-yaml';
import {logger as defaultLogger, GCFLogger} from 'gcf-utils';
import {sign} from 'jsonwebtoken';
import {request} from 'gaxios';
import {CloudBuildClient} from '@google-cloud/cloudbuild';
import {Octokit} from '@octokit/rest';
// eslint-disable-next-line node/no-extraneous-import
import {RequestError} from '@octokit/types';
// eslint-disable-next-line node/no-extraneous-import
import {OwlBotLock, OWL_BOT_LOCK_PATH, owlBotLockFrom} from './config-files';
import {OctokitFactory} from './octokit-util';
import {OWL_BOT_IGNORE} from './labels';
import {OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER} from './constants';
import {CopyCodeIntoPullRequestAction} from './copy-code';
import {google} from '@google-cloud/cloudbuild/build/protos/protos';
interface BuildArgs {
image: string;
privateKey: string;
appId: number;
installation: number;
repo: string;
pr: number;
project?: string;
trigger: string;
defaultBranch?: string;
}
export interface CheckArgs {
privateKey: string;
appId: number;
installation: number;
pr: number;
repo: string;
summary: string;
conclusion: 'success' | 'failure';
detailsURL: string;
text: string;
title: string;
}
interface AuthArgs {
privateKey: string;
appId: number;
installation: number;
}
interface BuildSummary {
conclusion: 'success' | 'failure';
summary: string;
text: string;
}
interface BuildResponse extends BuildSummary {
detailsURL: string;
}
interface Commit {
sha: string;
}
interface Token {
token: string;
expires_at: string;
permissions: object;
repository_selection: string;
}
export const OWL_BOT_LOCK_UPDATE = 'owl-bot-update-lock';
export const OWL_BOT_COPY = 'owl-bot-copy';
// Check back on the build every 1/3 of a minute (20000ms)
const PING_DELAY = 20000;
// 60 min * 3 hours * 3 * 1/3s of a minute (3 hours)
const TOTAL_PINGS = 3 * 60 * 3;
export async function triggerPostProcessBuild(
args: BuildArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
): Promise<BuildResponse | null> {
const token = await core.getGitHubShortLivedAccessToken(
args.privateKey,
args.appId,
args.installation
);
const project = args.project || process.env.PROJECT_ID;
if (!project) {
throw Error('gcloud project must be provided');
}
const [owner, repo] = args.repo.split('/');
if (!octokit) {
octokit = await core.getAuthenticatedOctokit(token.token);
}
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: args.pr,
});
// See if someone asked owl bot to ignore this PR.
if (prData.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`Ignoring ${owner}/${repo} #${args.pr} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return null;
}
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${args.pr}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const cb = core.getCloudBuildInstance();
const [resp] = await cb.runBuildTrigger({
projectId: project,
triggerId: args.trigger,
source: {
projectId: project,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token.token,
_PR: args.pr.toString(),
_PR_BRANCH: prData.head.ref,
_OWNER: owner,
_REPOSITORY: repo,
_PR_OWNER: prOwner,
_PR_REPOSITORY: prRepo,
// _CONTAINER must contain the image digest. For example:
// gcr.io/repo-automation-tools/nodejs-post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id; | const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
if (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubShortLivedAccessToken(
privateKey: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cache && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis"
* @param repo the rep name; ex: "nodejs-vision"
* @param path the file path within the repo; ex: ".github/.OwlBot.lock.yaml"
* @param ref the commit hash
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function getFileContent(
owner: string,
repo: string,
path: string,
ref: string,
octokit: Octokit
): Promise<string | undefined> {
try {
const data = (
await octokit.repos.getContent({
owner,
repo,
path,
ref,
})
).data as {content: string | undefined; encoding: string};
if (!data.content) {
return undefined;
}
if (data.encoding !== 'base64') {
throw Error(`unexpected encoding ${data.encoding} in ${owner}/${repo}`);
}
const text = Buffer.from(data.content, 'base64').toString('utf8');
return text;
} catch (e) {
const err = e as RequestError;
if (err.status === 404) return undefined;
else throw err;
}
}
/**
* Given a git repository and sha, returns the files modified by the
* given commit.
* @param path path to git repository on disk.
* @param sha commit to list modified files for.
* @returns a list of file paths.
*/
export async function getFilesModifiedBySha(
path: string,
sha: string
): Promise<string[]> {
// --no-renames to avoid
// warning: inexact rename detection was skipped due to too many files.
const out = await execAsync(`git show --name-only --no-renames ${sha}`, {
cwd: path,
// Handle 100,000+ files changing:
maxBuffer: 1024 * 1024 * 512,
});
if (out.stderr) throw Error(out.stderr);
const filesRaw = out.stdout.trim();
const files = [];
// We walk the output in reverse, since the file list is shown at the end
// of git show:
for (const file of filesRaw.split(/\r?\n/).reverse()) {
// There will be a blank line between the commit message and the
// files list, we use this as a stopping point:
if (file === '') break;
files.push(file);
}
return files;
}
/**
* Returns an iterator that returns the most recent commits added to a repository.
* @param repoFull org/repo
* @param octokit authenticated octokit instance.
*/
export async function* commitsIterator(
repoFull: string,
octokit: Octokit,
per_page = 25
) {
const [owner, repo] = repoFull.split('/');
for await (const response of octokit.paginate.iterator(
octokit.repos.listCommits,
{
owner,
repo,
per_page,
}
)) {
for (const commit of response.data) {
yield commit.sha;
}
}
}
/*
* Detect whether there's an update loop created by OwlBot post-processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for loop.
* @param octokit authenticated instance of octokit.
*/
async function hasOwlBotLoop(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
// If N (where N=circuitBreaker) commits are added to a pull-request
// by the post-processor one after another, this indicates that we're
// potentially looping, e.g., flip flopping a date between 2020 and 2021.
//
// It's okay to have 4 commits from Owl-Bot in a row, e.g., a commit for
// a code update plus the post processor.
//
// It's also okay to run the post-processor many more than circuitBreaker
// times on a long lived PR, with human edits being made.
const circuitBreaker = 5;
// TODO(bcoe): we should move to an async iterator for listCommits:
const commits = (
await octokit.pulls.listCommits({
pull_number: prNumber,
owner,
repo,
per_page: 100,
})
).data;
// get the most recent commits (limit by circuit breaker)
const lastFewCommits = commits
.sort((a, b) => {
const aDate = new Date(a.commit.author?.date || 0);
const bDate = new Date(b.commit.author?.date || 0);
// sort desc
return bDate.valueOf() - aDate.valueOf();
})
.slice(0, circuitBreaker);
// not enough commits to trigger a circuit breaker
if (lastFewCommits.length < circuitBreaker) return false;
for (const commit of lastFewCommits) {
if (
!commit.commit.message.includes(
OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER
)
)
return false;
}
// all of the recent commits were from owl-bot
return true;
}
/*
* Return whether or not the last commit was from OwlBot post processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for commit.
* @param octokit authenticated instance of octokit.
* @returns Promise was the last commit from OwlBot?
*/
async function lastCommitFromOwlBotPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
const commitMessages: Array<string> = [];
for await (const response of octokit.paginate.iterator(
octokit.rest.pulls.listCommits,
{
pull_number: prNumber,
owner,
repo,
per_page: 100,
}
)) {
for (const {commit} of response.data) {
commitMessages.push(commit.message);
}
}
const message = commitMessages[commitMessages.length - 1];
return message.includes(OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER);
}
/**
* After the post processor runs, we may want to close the pull request or
* promote it to "ready for review."
*/
async function updatePullRequestAfterPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit,
logger: GCFLogger = defaultLogger
): Promise<void> {
const {data: pull} = await octokit.pulls.get({
owner,
repo,
pull_number: prNumber,
});
// If someone asked owl bot to ignore this PR, never close or promote it.
if (pull.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return;
}
// If the pull request was not created by owl bot, never close or promote it.
const owlBotLabels = [OWL_BOT_LOCK_UPDATE, OWL_BOT_COPY];
if (!pull.labels.find(label => owlBotLabels.indexOf(label.name ?? '') >= 0)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's not labeled with ${owlBotLabels}.`
);
return;
}
// If running post-processor has created a noop change, close the
// pull request:
const files = (
await octokit.pulls.listFiles({
owner,
repo,
pull_number: prNumber,
})
).data;
if (!files.length) {
logger.info(
`Closing pull request ${pull.html_url} because listFiles() returned empty.`
);
await octokit.pulls.update({
owner,
repo,
pull_number: prNumber,
state: 'closed',
});
if (!pull?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${prNumber}`);
if (pull.head.repo.full_name === `${owner}/${repo}`) {
logger.info(`Deleting branch ${pull.head.ref}`);
await octokit.git.deleteRef({owner, repo, ref: `heads/${pull.head.ref}`});
} else {
logger.info(
`I won't delete the ${pull.head.ref} branch in the fork ` +
`${pull.head.repo.full_name}`
);
}
}
}
export interface RegenerateArgs {
owner: string;
repo: string;
branch: string;
prNumber: number;
gcpProjectId: string;
buildTriggerId: string;
action: CopyCodeIntoPullRequestAction;
}
export async function triggerRegeneratePullRequest(
octokitFactory: OctokitFactory,
args: RegenerateArgs
): Promise<void> {
const token = await octokitFactory.getGitHubShortLivedAccessToken();
const octokit = await octokitFactory.getShortLivedOctokit(token);
// No matter what the outcome, we'll create a comment below.
const _createComment = async (body: string): Promise<void> => {
await octokit.issues.createComment({
owner: args.owner,
repo: args.repo,
issue_number: args.prNumber,
body,
});
};
const reportError = (error: string) => {
console.error(error);
return _createComment(error);
};
const reportInfo = (text: string) => {
console.info(text);
return _createComment(text);
};
// The user checked the "Regenerate this pull request" box.
let buildName = '';
try {
const cb = core.getCloudBuildInstance();
// Is there a reason to wait for for the long-running build to complete
// here?
const [resp] = await cb.runBuildTrigger({
projectId: args.gcpProjectId,
triggerId: args.buildTriggerId,
source: {
projectId: args.gcpProjectId,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token,
_PR: args.prNumber.toString(),
_PR_BRANCH: args.branch,
_PR_OWNER: args.owner,
_REPOSITORY: args.repo,
_ACTION: args.action,
},
},
});
buildName = resp?.name ?? '';
} catch (err) {
await reportError(`Owl Bot failed to regenerate pull request ${args.prNumber}.
${err}`);
return;
}
await reportInfo(`Owl bot is regenerating pull request ${args.prNumber}...
Build name: ${stripBuildName(buildName)}`);
}
/**
* The build name returned by runBuildTrigger includes a full path with the
* project name, and I'd rather not show that to the world.
*/
function stripBuildName(buildName: string): string {
const chunks = buildName.split(/\//);
return chunks.length > 0 ? chunks[chunks.length - 1] : '';
}
export const core = {
commitsIterator,
createCheck,
getAccessTokenURL,
getAuthenticatedOctokit,
getCloudBuildInstance,
getFilesModifiedBySha,
getFileContent,
getGitHubShortLivedAccessToken,
fetchOwlBotLock,
parseOwlBotLock,
hasOwlBotLoop,
lastCommitFromOwlBotPostProcessor,
OWL_BOT_LOCK_PATH,
triggerPostProcessBuild,
triggerRegeneratePullRequest,
updatePullRequestAfterPostProcessor,
OWL_BOT_LOCK_UPDATE: OWL_BOT_LOCK_UPDATE,
}; | const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise(); | random_line_split |
core.ts | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import {exec} from 'child_process';
import {promisify} from 'util';
const execAsync = promisify(exec);
import {load} from 'js-yaml';
import {logger as defaultLogger, GCFLogger} from 'gcf-utils';
import {sign} from 'jsonwebtoken';
import {request} from 'gaxios';
import {CloudBuildClient} from '@google-cloud/cloudbuild';
import {Octokit} from '@octokit/rest';
// eslint-disable-next-line node/no-extraneous-import
import {RequestError} from '@octokit/types';
// eslint-disable-next-line node/no-extraneous-import
import {OwlBotLock, OWL_BOT_LOCK_PATH, owlBotLockFrom} from './config-files';
import {OctokitFactory} from './octokit-util';
import {OWL_BOT_IGNORE} from './labels';
import {OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER} from './constants';
import {CopyCodeIntoPullRequestAction} from './copy-code';
import {google} from '@google-cloud/cloudbuild/build/protos/protos';
interface BuildArgs {
image: string;
privateKey: string;
appId: number;
installation: number;
repo: string;
pr: number;
project?: string;
trigger: string;
defaultBranch?: string;
}
export interface CheckArgs {
privateKey: string;
appId: number;
installation: number;
pr: number;
repo: string;
summary: string;
conclusion: 'success' | 'failure';
detailsURL: string;
text: string;
title: string;
}
interface AuthArgs {
privateKey: string;
appId: number;
installation: number;
}
interface BuildSummary {
conclusion: 'success' | 'failure';
summary: string;
text: string;
}
interface BuildResponse extends BuildSummary {
detailsURL: string;
}
interface Commit {
sha: string;
}
interface Token {
token: string;
expires_at: string;
permissions: object;
repository_selection: string;
}
export const OWL_BOT_LOCK_UPDATE = 'owl-bot-update-lock';
export const OWL_BOT_COPY = 'owl-bot-copy';
// Check back on the build every 1/3 of a minute (20000ms)
const PING_DELAY = 20000;
// 60 min * 3 hours * 3 * 1/3s of a minute (3 hours)
const TOTAL_PINGS = 3 * 60 * 3;
export async function triggerPostProcessBuild(
args: BuildArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
): Promise<BuildResponse | null> {
const token = await core.getGitHubShortLivedAccessToken(
args.privateKey,
args.appId,
args.installation
);
const project = args.project || process.env.PROJECT_ID;
if (!project) {
throw Error('gcloud project must be provided');
}
const [owner, repo] = args.repo.split('/');
if (!octokit) {
octokit = await core.getAuthenticatedOctokit(token.token);
}
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: args.pr,
});
// See if someone asked owl bot to ignore this PR.
if (prData.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`Ignoring ${owner}/${repo} #${args.pr} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return null;
}
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${args.pr}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const cb = core.getCloudBuildInstance();
const [resp] = await cb.runBuildTrigger({
projectId: project,
triggerId: args.trigger,
source: {
projectId: project,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token.token,
_PR: args.pr.toString(),
_PR_BRANCH: prData.head.ref,
_OWNER: owner,
_REPOSITORY: repo,
_PR_OWNER: prOwner,
_PR_REPOSITORY: prRepo,
// _CONTAINER must contain the image digest. For example:
// gcr.io/repo-automation-tools/nodejs-post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id;
const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise();
const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
if (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubSh | Key: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cache && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis"
* @param repo the rep name; ex: "nodejs-vision"
* @param path the file path within the repo; ex: ".github/.OwlBot.lock.yaml"
* @param ref the commit hash
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function getFileContent(
owner: string,
repo: string,
path: string,
ref: string,
octokit: Octokit
): Promise<string | undefined> {
try {
const data = (
await octokit.repos.getContent({
owner,
repo,
path,
ref,
})
).data as {content: string | undefined; encoding: string};
if (!data.content) {
return undefined;
}
if (data.encoding !== 'base64') {
throw Error(`unexpected encoding ${data.encoding} in ${owner}/${repo}`);
}
const text = Buffer.from(data.content, 'base64').toString('utf8');
return text;
} catch (e) {
const err = e as RequestError;
if (err.status === 404) return undefined;
else throw err;
}
}
/**
* Given a git repository and sha, returns the files modified by the
* given commit.
* @param path path to git repository on disk.
* @param sha commit to list modified files for.
* @returns a list of file paths.
*/
export async function getFilesModifiedBySha(
path: string,
sha: string
): Promise<string[]> {
// --no-renames to avoid
// warning: inexact rename detection was skipped due to too many files.
const out = await execAsync(`git show --name-only --no-renames ${sha}`, {
cwd: path,
// Handle 100,000+ files changing:
maxBuffer: 1024 * 1024 * 512,
});
if (out.stderr) throw Error(out.stderr);
const filesRaw = out.stdout.trim();
const files = [];
// We walk the output in reverse, since the file list is shown at the end
// of git show:
for (const file of filesRaw.split(/\r?\n/).reverse()) {
// There will be a blank line between the commit message and the
// files list, we use this as a stopping point:
if (file === '') break;
files.push(file);
}
return files;
}
/**
* Returns an iterator that returns the most recent commits added to a repository.
* @param repoFull org/repo
* @param octokit authenticated octokit instance.
*/
export async function* commitsIterator(
repoFull: string,
octokit: Octokit,
per_page = 25
) {
const [owner, repo] = repoFull.split('/');
for await (const response of octokit.paginate.iterator(
octokit.repos.listCommits,
{
owner,
repo,
per_page,
}
)) {
for (const commit of response.data) {
yield commit.sha;
}
}
}
/*
* Detect whether there's an update loop created by OwlBot post-processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for loop.
* @param octokit authenticated instance of octokit.
*/
async function hasOwlBotLoop(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
// If N (where N=circuitBreaker) commits are added to a pull-request
// by the post-processor one after another, this indicates that we're
// potentially looping, e.g., flip flopping a date between 2020 and 2021.
//
// It's okay to have 4 commits from Owl-Bot in a row, e.g., a commit for
// a code update plus the post processor.
//
// It's also okay to run the post-processor many more than circuitBreaker
// times on a long lived PR, with human edits being made.
const circuitBreaker = 5;
// TODO(bcoe): we should move to an async iterator for listCommits:
const commits = (
await octokit.pulls.listCommits({
pull_number: prNumber,
owner,
repo,
per_page: 100,
})
).data;
// get the most recent commits (limit by circuit breaker)
const lastFewCommits = commits
.sort((a, b) => {
const aDate = new Date(a.commit.author?.date || 0);
const bDate = new Date(b.commit.author?.date || 0);
// sort desc
return bDate.valueOf() - aDate.valueOf();
})
.slice(0, circuitBreaker);
// not enough commits to trigger a circuit breaker
if (lastFewCommits.length < circuitBreaker) return false;
for (const commit of lastFewCommits) {
if (
!commit.commit.message.includes(
OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER
)
)
return false;
}
// all of the recent commits were from owl-bot
return true;
}
/*
* Return whether or not the last commit was from OwlBot post processor.
*
* @param owner owner of repo.
* @param repo short repo name.
* @param prNumber PR to check for commit.
* @param octokit authenticated instance of octokit.
* @returns Promise was the last commit from OwlBot?
*/
async function lastCommitFromOwlBotPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit
): Promise<boolean> {
const commitMessages: Array<string> = [];
for await (const response of octokit.paginate.iterator(
octokit.rest.pulls.listCommits,
{
pull_number: prNumber,
owner,
repo,
per_page: 100,
}
)) {
for (const {commit} of response.data) {
commitMessages.push(commit.message);
}
}
const message = commitMessages[commitMessages.length - 1];
return message.includes(OWL_BOT_POST_PROCESSOR_COMMIT_MESSAGE_MATCHER);
}
/**
* After the post processor runs, we may want to close the pull request or
* promote it to "ready for review."
*/
async function updatePullRequestAfterPostProcessor(
owner: string,
repo: string,
prNumber: number,
octokit: Octokit,
logger: GCFLogger = defaultLogger
): Promise<void> {
const {data: pull} = await octokit.pulls.get({
owner,
repo,
pull_number: prNumber,
});
// If someone asked owl bot to ignore this PR, never close or promote it.
if (pull.labels.find(label => label.name === OWL_BOT_IGNORE)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's labeled with ${OWL_BOT_IGNORE}.`
);
return;
}
// If the pull request was not created by owl bot, never close or promote it.
const owlBotLabels = [OWL_BOT_LOCK_UPDATE, OWL_BOT_COPY];
if (!pull.labels.find(label => owlBotLabels.indexOf(label.name ?? '') >= 0)) {
logger.info(
`I won't close or promote ${owner}/${repo} #${prNumber} because it's not labeled with ${owlBotLabels}.`
);
return;
}
// If running post-processor has created a noop change, close the
// pull request:
const files = (
await octokit.pulls.listFiles({
owner,
repo,
pull_number: prNumber,
})
).data;
if (!files.length) {
logger.info(
`Closing pull request ${pull.html_url} because listFiles() returned empty.`
);
await octokit.pulls.update({
owner,
repo,
pull_number: prNumber,
state: 'closed',
});
if (!pull?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${prNumber}`);
if (pull.head.repo.full_name === `${owner}/${repo}`) {
logger.info(`Deleting branch ${pull.head.ref}`);
await octokit.git.deleteRef({owner, repo, ref: `heads/${pull.head.ref}`});
} else {
logger.info(
`I won't delete the ${pull.head.ref} branch in the fork ` +
`${pull.head.repo.full_name}`
);
}
}
}
export interface RegenerateArgs {
owner: string;
repo: string;
branch: string;
prNumber: number;
gcpProjectId: string;
buildTriggerId: string;
action: CopyCodeIntoPullRequestAction;
}
export async function triggerRegeneratePullRequest(
octokitFactory: OctokitFactory,
args: RegenerateArgs
): Promise<void> {
const token = await octokitFactory.getGitHubShortLivedAccessToken();
const octokit = await octokitFactory.getShortLivedOctokit(token);
// No matter what the outcome, we'll create a comment below.
const _createComment = async (body: string): Promise<void> => {
await octokit.issues.createComment({
owner: args.owner,
repo: args.repo,
issue_number: args.prNumber,
body,
});
};
const reportError = (error: string) => {
console.error(error);
return _createComment(error);
};
const reportInfo = (text: string) => {
console.info(text);
return _createComment(text);
};
// The user checked the "Regenerate this pull request" box.
let buildName = '';
try {
const cb = core.getCloudBuildInstance();
// Is there a reason to wait for for the long-running build to complete
// here?
const [resp] = await cb.runBuildTrigger({
projectId: args.gcpProjectId,
triggerId: args.buildTriggerId,
source: {
projectId: args.gcpProjectId,
branchName: 'main', // TODO: It might fail if we change the default branch.
substitutions: {
_GITHUB_TOKEN: token,
_PR: args.prNumber.toString(),
_PR_BRANCH: args.branch,
_PR_OWNER: args.owner,
_REPOSITORY: args.repo,
_ACTION: args.action,
},
},
});
buildName = resp?.name ?? '';
} catch (err) {
await reportError(`Owl Bot failed to regenerate pull request ${args.prNumber}.
${err}`);
return;
}
await reportInfo(`Owl bot is regenerating pull request ${args.prNumber}...
Build name: ${stripBuildName(buildName)}`);
}
/**
* The build name returned by runBuildTrigger includes a full path with the
* project name, and I'd rather not show that to the world.
*/
function stripBuildName(buildName: string): string {
const chunks = buildName.split(/\//);
return chunks.length > 0 ? chunks[chunks.length - 1] : '';
}
export const core = {
commitsIterator,
createCheck,
getAccessTokenURL,
getAuthenticatedOctokit,
getCloudBuildInstance,
getFilesModifiedBySha,
getFileContent,
getGitHubShortLivedAccessToken,
fetchOwlBotLock,
parseOwlBotLock,
hasOwlBotLoop,
lastCommitFromOwlBotPostProcessor,
OWL_BOT_LOCK_PATH,
triggerPostProcessBuild,
triggerRegeneratePullRequest,
updatePullRequestAfterPostProcessor,
OWL_BOT_LOCK_UPDATE: OWL_BOT_LOCK_UPDATE,
};
| ortLivedAccessToken(
private | identifier_name |
main.py | import spacy
import get_input as gi
import csv
import os
import visualisierung as vi
from xml.etree import ElementTree
from matplotlib import pyplot as plt
from collections import Counter
spacy.cli.download("en_core_web_sm")
#Die obige Zeile downloaded "en_core_web_sm", nachdem man das gedownloaded hat, kann man es auskommentieren
nlp = spacy.load("en_core_web_sm")
def create_dir_for_saving_data(new_directory_name):
#Wir erstellen einen Ordner und geben dem User an, dass dieser erstellt wird
show_warnings = 1
show_info = 1
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
if show_info == 1:
print("Wir versuchen den Ordner '"+str(new_directory_name)+"' zu erstellen um dort den Output von diesem Programm zu speichern")
try:
os.mkdir(path)
print("Wir haben einen neuen Order mit dem Pfad:",path,"erstellt.")
except FileExistsError:
if show_warnings == 1:
print("Der Ordner '"+str(new_directory_name)+"' existiert bereits, wir werden ihn nicht erneut erstellen.")
print("Wir fahren wie geplant fort.")
except:
print("Ein unvorhergesehener Fehler ist aufgetreten.")
print("GGF. stimmt etwas mit dem Pfad für die Ordnererstellung nicht.")
def write_into_csv_file(new_directory_name, output_csv_filename, all_texts, output_count_pos_tags):
#Wir versehen jedes Wort in jeder der .xml Dateien mit dem von Spacy erkannten Part-of-Speech Tag
#Die Satzenden finden wir mit "doc.sents" und geben wir an den entsprechenden Stellen in der .csv Datei an
#Wir zählen parallel die Häufigkeit der verschiedenen PoS Tags und schreiben diese in ein anderes File.
#Wir zählen hier auch die Satzlängen
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
os.chdir(path)
full_output_csv_name = str(output_csv_filename) + ".csv"
with open(full_output_csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Wort:', 'PoS:', 'Satzende:'])
counter_for_pos = dict()
count_length_of_different_sentences = dict()
for text in all_texts:
thewriter.writerow(["Begin New File"])
doc = nlp(text)
for sentence in doc.sents:
#print("Ein Satz beginnt")
counter = 0
tokens_in_sentence = len(sentence)
words_in_sentence = 0
for token in sentence:
if token.pos_ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_trigg | ist_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_counted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_motion_verb_into_csv(dict_with_motion_text):
# Hier schreiben wir die in anderen Funktionen gezählten Motion Verben in eine .csv-Datei
csv_name = "output_counted_motion_verbs.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict(Counter(dict_with_motion_text).most_common(5)):
thewriter.writerow([entry, dict_with_motion_text.get(entry)])
def create_graph_for_sentence_lengths(dict_with_sentence_lengths):
#Hier stellen wir die Verteilung Satzlänge graphisch dar und speichern das Bild
x = []
y = []
for entry in dict_with_sentence_lengths:
x.append(entry)
y.append(dict_with_sentence_lengths.get(entry))
plt.bar(x, y, align='center')
plt.title("Anzahl der Wörter pro Satz")
plt.xlabel("Satzlänge")
plt.ylabel("Häufigkeit")
plt.savefig('Verteilung_der_satzlaenge.png', dpi=300, bbox_inches='tight')
#plt.show()
def do_part_2_2_vorverarbeitung(all_texts):
#Eine kleine Sub-funktione, welche den Output-Ordner erstellt und
#dafür sorgt, dass das die PoS-Tags gezählt und geschrieben werden und dabei
#zählen wir die Satzlängen und geben diese in einem dict zurück
create_dir_for_saving_data("output_data")
dict_with_sentence_lengths = write_into_csv_file("output_data", "output_text_with_pos", all_texts, "output_count_pos_tags")
return dict_with_sentence_lengths
def main():
input_data = gi.get_input_data()
all_texts = input_data[0]
dict_with_sentence_lengths = do_part_2_2_vorverarbeitung(all_texts)
abs_paths_to_xml_files = input_data[1]
#count the different tags for 2.3.b)
get_location_amount = count_locations(all_texts)
xml_tags = get_tags_from_xml(abs_paths_to_xml_files)
write_counted_tags_and_loc_into_csv(xml_tags[0], get_location_amount)
write_counted_qslink_types_into_csv(xml_tags[1])
write_counted_qslink_and_oslink_praep_word_triggers_into_csv(xml_tags[2])
write_counted_motion_verb_into_csv(xml_tags[3])
create_graph_for_sentence_lengths(dict_with_sentence_lengths)
vi.visualisierung_fuer_nummer_vier(abs_paths_to_xml_files)
print("Alles hat erfolgreich geklappt!")
print("Der Output ist in dem Ordner: 'output_data' in entsprechenden Bildern und .csv-Dateien")
if __name__ == '__main__':
main() | ers_for_os_links[potential_match[1]] = 1
double_l | conditional_block |
main.py | import spacy
import get_input as gi
import csv
import os
import visualisierung as vi
from xml.etree import ElementTree
from matplotlib import pyplot as plt
from collections import Counter
spacy.cli.download("en_core_web_sm")
#Die obige Zeile downloaded "en_core_web_sm", nachdem man das gedownloaded hat, kann man es auskommentieren
nlp = spacy.load("en_core_web_sm")
def create_dir_for_saving_data(new_directory_name):
#Wir erstellen einen Ordner und geben dem User an, dass dieser erstellt wird
show_warnings = 1
show_info = 1
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
if show_info == 1:
print("Wir versuchen den Ordner '"+str(new_directory_name)+"' zu erstellen um dort den Output von diesem Programm zu speichern")
try:
os.mkdir(path)
print("Wir haben einen neuen Order mit dem Pfad:",path,"erstellt.")
except FileExistsError:
if show_warnings == 1:
print("Der Ordner '"+str(new_directory_name)+"' existiert bereits, wir werden ihn nicht erneut erstellen.")
print("Wir fahren wie geplant fort.")
except:
print("Ein unvorhergesehener Fehler ist aufgetreten.")
print("GGF. stimmt etwas mit dem Pfad für die Ordnererstellung nicht.")
def write_into_csv_file(new_directory_name, output_csv_filename, all_texts, output_count_pos_tags):
#Wir versehen jedes Wort in jeder der .xml Dateien mit dem von Spacy erkannten Part-of-Speech Tag
#Die Satzenden finden wir mit "doc.sents" und geben wir an den entsprechenden Stellen in der .csv Datei an
#Wir zählen parallel die Häufigkeit der verschiedenen PoS Tags und schreiben diese in ein anderes File.
#Wir zählen hier auch die Satzlängen
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
os.chdir(path)
full_output_csv_name = str(output_csv_filename) + ".csv"
with open(full_output_csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Wort:', 'PoS:', 'Satzende:'])
counter_for_pos = dict()
count_length_of_different_sentences = dict()
for text in all_texts:
thewriter.writerow(["Begin New File"])
doc = nlp(text)
for sentence in doc.sents:
#print("Ein Satz beginnt")
counter = 0
tokens_in_sentence = len(sentence)
words_in_sentence = 0
for token in sentence:
if token.pos_ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_triggers_for_os_links[potential_match[1]] = 1
double_list_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_counted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_motion_verb_into_csv(dict_with_motion_text):
# Hier schreiben wir die in anderen Funktionen gezählten Motion Verben in eine .csv-Datei
csv_name = "output_counted_motion_verbs.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict(Counter(dict_with_motion_text).most_common(5)):
thewriter.writerow([entry, dict_with_motion_text.get(entry)])
def create_graph_for_sentence_lengths(dict_with_sentence_lengths):
#Hier stellen wir die Verteilung Satzlänge graphisch dar und speichern das Bild
x = []
y = []
for entry in dict_with_sentence_lengths:
x.append(entry)
y.append(dict_with_sentence_lengths.get(entry))
plt.bar(x, y, align='center')
| plt.savefig('Verteilung_der_satzlaenge.png', dpi=300, bbox_inches='tight')
#plt.show()
def do_part_2_2_vorverarbeitung(all_texts):
#Eine kleine Sub-funktione, welche den Output-Ordner erstellt und
#dafür sorgt, dass das die PoS-Tags gezählt und geschrieben werden und dabei
#zählen wir die Satzlängen und geben diese in einem dict zurück
create_dir_for_saving_data("output_data")
dict_with_sentence_lengths = write_into_csv_file("output_data", "output_text_with_pos", all_texts, "output_count_pos_tags")
return dict_with_sentence_lengths
def main():
input_data = gi.get_input_data()
all_texts = input_data[0]
dict_with_sentence_lengths = do_part_2_2_vorverarbeitung(all_texts)
abs_paths_to_xml_files = input_data[1]
#count the different tags for 2.3.b)
get_location_amount = count_locations(all_texts)
xml_tags = get_tags_from_xml(abs_paths_to_xml_files)
write_counted_tags_and_loc_into_csv(xml_tags[0], get_location_amount)
write_counted_qslink_types_into_csv(xml_tags[1])
write_counted_qslink_and_oslink_praep_word_triggers_into_csv(xml_tags[2])
write_counted_motion_verb_into_csv(xml_tags[3])
create_graph_for_sentence_lengths(dict_with_sentence_lengths)
vi.visualisierung_fuer_nummer_vier(abs_paths_to_xml_files)
print("Alles hat erfolgreich geklappt!")
print("Der Output ist in dem Ordner: 'output_data' in entsprechenden Bildern und .csv-Dateien")
if __name__ == '__main__':
main() | plt.title("Anzahl der Wörter pro Satz")
plt.xlabel("Satzlänge")
plt.ylabel("Häufigkeit") | random_line_split |
main.py | import spacy
import get_input as gi
import csv
import os
import visualisierung as vi
from xml.etree import ElementTree
from matplotlib import pyplot as plt
from collections import Counter
spacy.cli.download("en_core_web_sm")
#Die obige Zeile downloaded "en_core_web_sm", nachdem man das gedownloaded hat, kann man es auskommentieren
nlp = spacy.load("en_core_web_sm")
def create_dir_for_saving_data(new_directory_name):
#Wir erstellen einen Ordner und geben dem User an, dass dieser erstellt wird
show_warnings = 1
show_info = 1
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
if show_info == 1:
print("Wir versuchen den Ordner '"+str(new_directory_name)+"' zu erstellen um dort den Output von diesem Programm zu speichern")
try:
os.mkdir(path)
print("Wir haben einen neuen Order mit dem Pfad:",path,"erstellt.")
except FileExistsError:
if show_warnings == 1:
print("Der Ordner '"+str(new_directory_name)+"' existiert bereits, wir werden ihn nicht erneut erstellen.")
print("Wir fahren wie geplant fort.")
except:
print("Ein unvorhergesehener Fehler ist aufgetreten.")
print("GGF. stimmt etwas mit dem Pfad für die Ordnererstellung nicht.")
def write_into_csv_file(new_directory_name, output_csv_filename, all_texts, output_count_pos_tags):
#Wir versehen jedes Wort in jeder der .xml Dateien mit dem von Spacy erkannten Part-of-Speech Tag
#Die Satzenden finden wir mit "doc.sents" und geben wir an den entsprechenden Stellen in der .csv Datei an
#Wir zählen parallel die Häufigkeit der verschiedenen PoS Tags und schreiben diese in ein anderes File.
#Wir zählen hier auch die Satzlängen
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
os.chdir(path)
full_output_csv_name = str(output_csv_filename) + ".csv"
with open(full_output_csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Wort:', 'PoS:', 'Satzende:'])
counter_for_pos = dict()
count_length_of_different_sentences = dict()
for text in all_texts:
thewriter.writerow(["Begin New File"])
doc = nlp(text)
for sentence in doc.sents:
#print("Ein Satz beginnt")
counter = 0
tokens_in_sentence = len(sentence)
words_in_sentence = 0
for token in sentence:
if token.pos_ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_triggers_for_os_links[potential_match[1]] = 1
double_list_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_counted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_motion_verb_into_csv(dict_with_motion_text):
# Hier schreiben wir die in anderen Funktionen gezählten Motion Verben in eine .csv-Datei
csv_name = "output_counted_motion_verbs.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict(Counter(dict_with_motion_text).most_common(5)):
thewriter.writerow([entry, dict_with_motion_text.get(entry)])
def create_graph_for_sentence_lengths(dict_with_sentence_lengths):
#Hier stellen wir die Verteilung Satzlänge graphisch dar und speichern das Bild
x = []
y = []
for entry in dict_with_sentence_lengths:
x.append(entry)
y.append(dict_with_sentence_lengths.get(entry))
plt.bar(x, y, align='center')
plt.title("Anzahl der Wörter pro Satz")
plt.xlabel("Satzlänge")
plt.ylabel("Häufigkeit")
plt.savefig('Verteilung_der_satzlaenge.png', dpi=300, bbox_inches='tight')
#plt.show()
def do_part_2_2_vorverarbeitung(all_texts):
#Eine kleine Sub-funktione, welche den Output-Ordner erstellt und
#dafür sorgt, dass das die PoS-Tags gezählt und geschrieben werden und dabei
#zählen wir die Satzlängen und geben diese in einem dict zurück
create_dir_for_saving_data("output_data")
dict_with_sentence_lengths = write_into_csv_file("output_data", "output_text_with_pos", all_texts, "output_count_pos_tags")
return dict_with_sentence_lengths
def main():
input_data = gi.get | ut_data()
all_texts = input_data[0]
dict_with_sentence_lengths = do_part_2_2_vorverarbeitung(all_texts)
abs_paths_to_xml_files = input_data[1]
#count the different tags for 2.3.b)
get_location_amount = count_locations(all_texts)
xml_tags = get_tags_from_xml(abs_paths_to_xml_files)
write_counted_tags_and_loc_into_csv(xml_tags[0], get_location_amount)
write_counted_qslink_types_into_csv(xml_tags[1])
write_counted_qslink_and_oslink_praep_word_triggers_into_csv(xml_tags[2])
write_counted_motion_verb_into_csv(xml_tags[3])
create_graph_for_sentence_lengths(dict_with_sentence_lengths)
vi.visualisierung_fuer_nummer_vier(abs_paths_to_xml_files)
print("Alles hat erfolgreich geklappt!")
print("Der Output ist in dem Ordner: 'output_data' in entsprechenden Bildern und .csv-Dateien")
if __name__ == '__main__':
main() | _inp | identifier_name |
main.py | import spacy
import get_input as gi
import csv
import os
import visualisierung as vi
from xml.etree import ElementTree
from matplotlib import pyplot as plt
from collections import Counter
spacy.cli.download("en_core_web_sm")
#Die obige Zeile downloaded "en_core_web_sm", nachdem man das gedownloaded hat, kann man es auskommentieren
nlp = spacy.load("en_core_web_sm")
def create_dir_for_saving_data(new_directory_name):
#Wir erstellen einen Ordner und geben dem User an, dass dieser erstellt wird
show_warnings = 1
show_info = 1
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
if show_info == 1:
print("Wir versuchen den Ordner '"+str(new_directory_name)+"' zu erstellen um dort den Output von diesem Programm zu speichern")
try:
os.mkdir(path)
print("Wir haben einen neuen Order mit dem Pfad:",path,"erstellt.")
except FileExistsError:
if show_warnings == 1:
print("Der Ordner '"+str(new_directory_name)+"' existiert bereits, wir werden ihn nicht erneut erstellen.")
print("Wir fahren wie geplant fort.")
except:
print("Ein unvorhergesehener Fehler ist aufgetreten.")
print("GGF. stimmt etwas mit dem Pfad für die Ordnererstellung nicht.")
def write_into_csv_file(new_directory_name, output_csv_filename, all_texts, output_count_pos_tags):
#Wir versehen jedes Wort in jeder der .xml Dateien mit dem von Spacy erkannten Part-of-Speech Tag
#Die Satzenden finden wir mit "doc.sents" und geben wir an den entsprechenden Stellen in der .csv Datei an
#Wir zählen parallel die Häufigkeit der verschiedenen PoS Tags und schreiben diese in ein anderes File.
#Wir zählen hier auch die Satzlängen
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
os.chdir(path)
full_output_csv_name = str(output_csv_filename) + ".csv"
with open(full_output_csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Wort:', 'PoS:', 'Satzende:'])
counter_for_pos = dict()
count_length_of_different_sentences = dict()
for text in all_texts:
thewriter.writerow(["Begin New File"])
doc = nlp(text)
for sentence in doc.sents:
#print("Ein Satz beginnt")
counter = 0
tokens_in_sentence = len(sentence)
words_in_sentence = 0
for token in sentence:
if token.pos_ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_triggers_for_os_links[potential_match[1]] = 1
double_list_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_co | motion_verb_into_csv(dict_with_motion_text):
# Hier schreiben wir die in anderen Funktionen gezählten Motion Verben in eine .csv-Datei
csv_name = "output_counted_motion_verbs.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict(Counter(dict_with_motion_text).most_common(5)):
thewriter.writerow([entry, dict_with_motion_text.get(entry)])
def create_graph_for_sentence_lengths(dict_with_sentence_lengths):
#Hier stellen wir die Verteilung Satzlänge graphisch dar und speichern das Bild
x = []
y = []
for entry in dict_with_sentence_lengths:
x.append(entry)
y.append(dict_with_sentence_lengths.get(entry))
plt.bar(x, y, align='center')
plt.title("Anzahl der Wörter pro Satz")
plt.xlabel("Satzlänge")
plt.ylabel("Häufigkeit")
plt.savefig('Verteilung_der_satzlaenge.png', dpi=300, bbox_inches='tight')
#plt.show()
def do_part_2_2_vorverarbeitung(all_texts):
#Eine kleine Sub-funktione, welche den Output-Ordner erstellt und
#dafür sorgt, dass das die PoS-Tags gezählt und geschrieben werden und dabei
#zählen wir die Satzlängen und geben diese in einem dict zurück
create_dir_for_saving_data("output_data")
dict_with_sentence_lengths = write_into_csv_file("output_data", "output_text_with_pos", all_texts, "output_count_pos_tags")
return dict_with_sentence_lengths
def main():
input_data = gi.get_input_data()
all_texts = input_data[0]
dict_with_sentence_lengths = do_part_2_2_vorverarbeitung(all_texts)
abs_paths_to_xml_files = input_data[1]
#count the different tags for 2.3.b)
get_location_amount = count_locations(all_texts)
xml_tags = get_tags_from_xml(abs_paths_to_xml_files)
write_counted_tags_and_loc_into_csv(xml_tags[0], get_location_amount)
write_counted_qslink_types_into_csv(xml_tags[1])
write_counted_qslink_and_oslink_praep_word_triggers_into_csv(xml_tags[2])
write_counted_motion_verb_into_csv(xml_tags[3])
create_graph_for_sentence_lengths(dict_with_sentence_lengths)
vi.visualisierung_fuer_nummer_vier(abs_paths_to_xml_files)
print("Alles hat erfolgreich geklappt!")
print("Der Output ist in dem Ordner: 'output_data' in entsprechenden Bildern und .csv-Dateien")
if __name__ == '__main__':
main() | unted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_ | identifier_body |
conn.go | package rudp
import (
"errors"
"io"
"math/rand"
"net"
"time"
)
const (
maxWaitSegmentCntWhileConn = 3
)
var rander = rand.New(rand.NewSource(time.Now().Unix()))
type connStatus int8
const (
connStatusConnecting connStatus = iota
connStatusOpen
connStatusClose
connStatusErr
)
type connType int8
const (
connTypeServer connType = iota
connTypeClient
)
var (
rudpConnClosedErr = errors.New("the rudp connection closed")
resolveRUDPSegmentErr = errors.New("rudp message segment resolved error")
)
// RUDPConn is the reliable conn base on udp
type RUDPConn struct {
sendPacketChannel chan *packet // all kinds of packet already to send
resendPacketQueue *packetList // packet from sendPacketQueue that waiting for the peer's ack segment
recvPacketChannel chan *packet // all kinds of packets recv: ack/conn/fin/etc....
outputPacketQueue *packetList // the data for application layer
outputDataTmpBuffer []byte // temporary save the surplus data for aplication layer that would be read next time
rawUDPDataChan chan []byte // only server conn need to
localAddr *net.UDPAddr
remoteAddr *net.UDPAddr
rawUDPConn *net.UDPConn
sendSeqNumber uint32
lastRecvTs int64 // last recv data unix timestamp
lastSendTs int64
maxHasReadSeqNumber uint32 // the max number that the application has read
sendTickNano int32
sendTickModifyEvent chan int32
heartBeatCycleMinute int8
closeConnCallback func() // execute when connection closed
buildConnCallbackListener func() // execute when connection build
closeConnCallbackListener func()
rudpConnStatus connStatus
rudpConnType connType
// react by errBus
sendStop chan error
resendStop chan error
recvStop chan error
packetHandlerStop chan error
heartbeatStop chan error
errBus chan error
err error
}
// DialRUDP client dial server, building a relieable connection
func DialRUDP(localAddr, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient | else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) send() {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = connStatusConnecting
c.recvPacketChannel = make(chan *packet, 1<<5)
c.sendPacketChannel = make(chan *packet, 1<<5)
c.rawUDPDataChan = make(chan []byte, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
go c.errWatcher()
// net io
go c.send()
go c.recv()
go c.resend()
go c.packetHandler()
return c, nil
}
func (c *RUDPConn) Read(b []byte) (int, error) {
readCnt := len(b)
n := len(b)
if n == 0 {
return 0, nil
}
curWrite := 0
if len(c.outputDataTmpBuffer) != 0 {
if n <= len(c.outputDataTmpBuffer) {
copy(b, c.outputDataTmpBuffer[:n])
c.outputDataTmpBuffer = c.outputDataTmpBuffer[n:]
return readCnt, nil
} else {
n -= len(c.outputDataTmpBuffer)
curWrite += len(c.outputDataTmpBuffer)
copy(b, c.outputDataTmpBuffer)
}
}
for n > 0 {
apacket := c.outputPacketQueue.consume()
if apacket.seqNumber - c.maxHasReadSeqNumber != 1 {
log("发生丢包 cur %d max %d\n", apacket.seqNumber, c.maxHasReadSeqNumber)
}
// apacket.print()
c.maxHasReadSeqNumber = apacket.seqNumber
data := apacket.payload
if n <= len(data) {
copy(b[curWrite:], data[:n])
c.outputDataTmpBuffer = data[n:]
return readCnt, nil
} else {
copy(b[curWrite:], data)
n -= len(data)
curWrite += len(data)
}
}
return 0, nil
}
func (c *RUDPConn) Write(b []byte) (int, error) {
if c.err != nil {
return 0, errors.New("rudp write error: " + c.err.Error())
}
n := len(b)
for {
if len(b) <= rudpPayloadLenLimit {
c.sendPacketChannel <- newNormalPacket(b, c.sendSeqNumber)
c.sendSeqNumber++
return n, nil
} else {
c.sendPacketChannel <- newNormalPacket(b[:rudpPayloadLenLimit], c.sendSeqNumber)
c.sendSeqNumber++
b = b[rudpPayloadLenLimit:]
}
}
}
// Close close must be called while closing the conn
func (c *RUDPConn) Close() error {
if c.rudpConnStatus != connStatusOpen {
return errors.New("the rudp connection is not open status!")
}
defer func() {
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
c.closeConnCallback()
}()
finSegment := newFinPacket().marshal()
n, err := c.write(finSegment)
if err != nil {
return err
}
if n != len(finSegment) {
return errors.New(RawUDPSendNotComplete)
}
c.errBus <- io.EOF
return nil
}
func (c *RUDPConn) resend() {
ticker := time.NewTicker(time.Millisecond * time.Duration(resendDelayThreshholdMS))
defer ticker.Stop()
for {
select {
case <-c.resendStop:
return
case <-ticker.C:
resendPacketList := c.resendPacketQueue.consumePacketSinceNMs(resendDelayThreshholdMS)
if len(resendPacketList) != 0 {
log("一轮重传\n")
}
for _, resendPacket := range resendPacketList {
segment := resendPacket.marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
log("重传 %d\n", resendPacket.seqNumber)
}
}
}
}
func (c *RUDPConn) LocalAddr() net.Addr {
return c.localAddr
}
func (c *RUDPConn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c *RUDPConn) SetDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetWriteDeadline(t time.Time) error {
return nil
}
| {
c.clientRecv()
} | conditional_block |
conn.go | package rudp
import (
"errors"
"io"
"math/rand"
"net"
"time"
)
const (
maxWaitSegmentCntWhileConn = 3
)
var rander = rand.New(rand.NewSource(time.Now().Unix()))
type connStatus int8
const (
connStatusConnecting connStatus = iota
connStatusOpen
connStatusClose
connStatusErr
)
type connType int8
const (
connTypeServer connType = iota
connTypeClient
)
var (
rudpConnClosedErr = errors.New("the rudp connection closed")
resolveRUDPSegmentErr = errors.New("rudp message segment resolved error")
)
// RUDPConn is the reliable conn base on udp
type RUDPConn struct {
sendPacketChannel chan *packet // all kinds of packet already to send
resendPacketQueue *packetList // packet from sendPacketQueue that waiting for the peer's ack segment
recvPacketChannel chan *packet // all kinds of packets recv: ack/conn/fin/etc....
outputPacketQueue *packetList // the data for application layer
outputDataTmpBuffer []byte // temporary save the surplus data for aplication layer that would be read next time
rawUDPDataChan chan []byte // only server conn need to
localAddr *net.UDPAddr
remoteAddr *net.UDPAddr
rawUDPConn *net.UDPConn
sendSeqNumber uint32
lastRecvTs int64 // last recv data unix timestamp
lastSendTs int64
maxHasReadSeqNumber uint32 // the max number that the application has read
sendTickNano int32
sendTickModifyEvent chan int32
heartBeatCycleMinute int8
closeConnCallback func() // execute when connection closed
buildConnCallbackListener func() // execute when connection build
closeConnCallbackListener func()
rudpConnStatus connStatus
rudpConnType connType
// react by errBus
sendStop chan error
resendStop chan error
recvStop chan error
packetHandlerStop chan error
heartbeatStop chan error
errBus chan error
err error
} | c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient {
c.clientRecv()
} else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) send() {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = connStatusConnecting
c.recvPacketChannel = make(chan *packet, 1<<5)
c.sendPacketChannel = make(chan *packet, 1<<5)
c.rawUDPDataChan = make(chan []byte, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
go c.errWatcher()
// net io
go c.send()
go c.recv()
go c.resend()
go c.packetHandler()
return c, nil
}
func (c *RUDPConn) Read(b []byte) (int, error) {
readCnt := len(b)
n := len(b)
if n == 0 {
return 0, nil
}
curWrite := 0
if len(c.outputDataTmpBuffer) != 0 {
if n <= len(c.outputDataTmpBuffer) {
copy(b, c.outputDataTmpBuffer[:n])
c.outputDataTmpBuffer = c.outputDataTmpBuffer[n:]
return readCnt, nil
} else {
n -= len(c.outputDataTmpBuffer)
curWrite += len(c.outputDataTmpBuffer)
copy(b, c.outputDataTmpBuffer)
}
}
for n > 0 {
apacket := c.outputPacketQueue.consume()
if apacket.seqNumber - c.maxHasReadSeqNumber != 1 {
log("发生丢包 cur %d max %d\n", apacket.seqNumber, c.maxHasReadSeqNumber)
}
// apacket.print()
c.maxHasReadSeqNumber = apacket.seqNumber
data := apacket.payload
if n <= len(data) {
copy(b[curWrite:], data[:n])
c.outputDataTmpBuffer = data[n:]
return readCnt, nil
} else {
copy(b[curWrite:], data)
n -= len(data)
curWrite += len(data)
}
}
return 0, nil
}
func (c *RUDPConn) Write(b []byte) (int, error) {
if c.err != nil {
return 0, errors.New("rudp write error: " + c.err.Error())
}
n := len(b)
for {
if len(b) <= rudpPayloadLenLimit {
c.sendPacketChannel <- newNormalPacket(b, c.sendSeqNumber)
c.sendSeqNumber++
return n, nil
} else {
c.sendPacketChannel <- newNormalPacket(b[:rudpPayloadLenLimit], c.sendSeqNumber)
c.sendSeqNumber++
b = b[rudpPayloadLenLimit:]
}
}
}
// Close close must be called while closing the conn
func (c *RUDPConn) Close() error {
if c.rudpConnStatus != connStatusOpen {
return errors.New("the rudp connection is not open status!")
}
defer func() {
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
c.closeConnCallback()
}()
finSegment := newFinPacket().marshal()
n, err := c.write(finSegment)
if err != nil {
return err
}
if n != len(finSegment) {
return errors.New(RawUDPSendNotComplete)
}
c.errBus <- io.EOF
return nil
}
func (c *RUDPConn) resend() {
ticker := time.NewTicker(time.Millisecond * time.Duration(resendDelayThreshholdMS))
defer ticker.Stop()
for {
select {
case <-c.resendStop:
return
case <-ticker.C:
resendPacketList := c.resendPacketQueue.consumePacketSinceNMs(resendDelayThreshholdMS)
if len(resendPacketList) != 0 {
log("一轮重传\n")
}
for _, resendPacket := range resendPacketList {
segment := resendPacket.marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
log("重传 %d\n", resendPacket.seqNumber)
}
}
}
}
func (c *RUDPConn) LocalAddr() net.Addr {
return c.localAddr
}
func (c *RUDPConn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c *RUDPConn) SetDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetWriteDeadline(t time.Time) error {
return nil
} |
// DialRUDP client dial server, building a relieable connection
func DialRUDP(localAddr, remoteAddr *net.UDPAddr) (*RUDPConn, error) { | random_line_split |
conn.go | package rudp
import (
"errors"
"io"
"math/rand"
"net"
"time"
)
const (
maxWaitSegmentCntWhileConn = 3
)
var rander = rand.New(rand.NewSource(time.Now().Unix()))
type connStatus int8
const (
connStatusConnecting connStatus = iota
connStatusOpen
connStatusClose
connStatusErr
)
type connType int8
const (
connTypeServer connType = iota
connTypeClient
)
var (
rudpConnClosedErr = errors.New("the rudp connection closed")
resolveRUDPSegmentErr = errors.New("rudp message segment resolved error")
)
// RUDPConn is the reliable conn base on udp
type RUDPConn struct {
sendPacketChannel chan *packet // all kinds of packet already to send
resendPacketQueue *packetList // packet from sendPacketQueue that waiting for the peer's ack segment
recvPacketChannel chan *packet // all kinds of packets recv: ack/conn/fin/etc....
outputPacketQueue *packetList // the data for application layer
outputDataTmpBuffer []byte // temporary save the surplus data for aplication layer that would be read next time
rawUDPDataChan chan []byte // only server conn need to
localAddr *net.UDPAddr
remoteAddr *net.UDPAddr
rawUDPConn *net.UDPConn
sendSeqNumber uint32
lastRecvTs int64 // last recv data unix timestamp
lastSendTs int64
maxHasReadSeqNumber uint32 // the max number that the application has read
sendTickNano int32
sendTickModifyEvent chan int32
heartBeatCycleMinute int8
closeConnCallback func() // execute when connection closed
buildConnCallbackListener func() // execute when connection build
closeConnCallbackListener func()
rudpConnStatus connStatus
rudpConnType connType
// react by errBus
sendStop chan error
resendStop chan error
recvStop chan error
packetHandlerStop chan error
heartbeatStop chan error
errBus chan error
err error
}
// DialRUDP client dial server, building a relieable connection
func DialRUDP(localAddr, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient {
c.clientRecv()
} else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) send() {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = connStatusConnecting
c.recvPacketChannel = make(chan *packet, 1<<5)
c.sendPacketChannel = make(chan *packet, 1<<5)
c.rawUDPDataChan = make(chan []byte, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
go c.errWatcher()
// net io
go c.send()
go c.recv()
go c.resend()
go c.packetHandler()
return c, nil
}
func (c *RUDPConn) Read(b []byte) (int, error) | c *RUDPConn) Write(b []byte) (int, error) {
if c.err != nil {
return 0, errors.New("rudp write error: " + c.err.Error())
}
n := len(b)
for {
if len(b) <= rudpPayloadLenLimit {
c.sendPacketChannel <- newNormalPacket(b, c.sendSeqNumber)
c.sendSeqNumber++
return n, nil
} else {
c.sendPacketChannel <- newNormalPacket(b[:rudpPayloadLenLimit], c.sendSeqNumber)
c.sendSeqNumber++
b = b[rudpPayloadLenLimit:]
}
}
}
// Close close must be called while closing the conn
func (c *RUDPConn) Close() error {
if c.rudpConnStatus != connStatusOpen {
return errors.New("the rudp connection is not open status!")
}
defer func() {
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
c.closeConnCallback()
}()
finSegment := newFinPacket().marshal()
n, err := c.write(finSegment)
if err != nil {
return err
}
if n != len(finSegment) {
return errors.New(RawUDPSendNotComplete)
}
c.errBus <- io.EOF
return nil
}
func (c *RUDPConn) resend() {
ticker := time.NewTicker(time.Millisecond * time.Duration(resendDelayThreshholdMS))
defer ticker.Stop()
for {
select {
case <-c.resendStop:
return
case <-ticker.C:
resendPacketList := c.resendPacketQueue.consumePacketSinceNMs(resendDelayThreshholdMS)
if len(resendPacketList) != 0 {
log("一轮重传\n")
}
for _, resendPacket := range resendPacketList {
segment := resendPacket.marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
log("重传 %d\n", resendPacket.seqNumber)
}
}
}
}
func (c *RUDPConn) LocalAddr() net.Addr {
return c.localAddr
}
func (c *RUDPConn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c *RUDPConn) SetDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetWriteDeadline(t time.Time) error {
return nil
}
| {
readCnt := len(b)
n := len(b)
if n == 0 {
return 0, nil
}
curWrite := 0
if len(c.outputDataTmpBuffer) != 0 {
if n <= len(c.outputDataTmpBuffer) {
copy(b, c.outputDataTmpBuffer[:n])
c.outputDataTmpBuffer = c.outputDataTmpBuffer[n:]
return readCnt, nil
} else {
n -= len(c.outputDataTmpBuffer)
curWrite += len(c.outputDataTmpBuffer)
copy(b, c.outputDataTmpBuffer)
}
}
for n > 0 {
apacket := c.outputPacketQueue.consume()
if apacket.seqNumber - c.maxHasReadSeqNumber != 1 {
log("发生丢包 cur %d max %d\n", apacket.seqNumber, c.maxHasReadSeqNumber)
}
// apacket.print()
c.maxHasReadSeqNumber = apacket.seqNumber
data := apacket.payload
if n <= len(data) {
copy(b[curWrite:], data[:n])
c.outputDataTmpBuffer = data[n:]
return readCnt, nil
} else {
copy(b[curWrite:], data)
n -= len(data)
curWrite += len(data)
}
}
return 0, nil
}
func ( | identifier_body |
conn.go | package rudp
import (
"errors"
"io"
"math/rand"
"net"
"time"
)
const (
maxWaitSegmentCntWhileConn = 3
)
var rander = rand.New(rand.NewSource(time.Now().Unix()))
type connStatus int8
const (
connStatusConnecting connStatus = iota
connStatusOpen
connStatusClose
connStatusErr
)
type connType int8
const (
connTypeServer connType = iota
connTypeClient
)
var (
rudpConnClosedErr = errors.New("the rudp connection closed")
resolveRUDPSegmentErr = errors.New("rudp message segment resolved error")
)
// RUDPConn is the reliable conn base on udp
type RUDPConn struct {
sendPacketChannel chan *packet // all kinds of packet already to send
resendPacketQueue *packetList // packet from sendPacketQueue that waiting for the peer's ack segment
recvPacketChannel chan *packet // all kinds of packets recv: ack/conn/fin/etc....
outputPacketQueue *packetList // the data for application layer
outputDataTmpBuffer []byte // temporary save the surplus data for aplication layer that would be read next time
rawUDPDataChan chan []byte // only server conn need to
localAddr *net.UDPAddr
remoteAddr *net.UDPAddr
rawUDPConn *net.UDPConn
sendSeqNumber uint32
lastRecvTs int64 // last recv data unix timestamp
lastSendTs int64
maxHasReadSeqNumber uint32 // the max number that the application has read
sendTickNano int32
sendTickModifyEvent chan int32
heartBeatCycleMinute int8
closeConnCallback func() // execute when connection closed
buildConnCallbackListener func() // execute when connection build
closeConnCallbackListener func()
rudpConnStatus connStatus
rudpConnType connType
// react by errBus
sendStop chan error
resendStop chan error
recvStop chan error
packetHandlerStop chan error
heartbeatStop chan error
errBus chan error
err error
}
// DialRUDP client dial server, building a relieable connection
func DialRUDP(localAddr, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient {
c.clientRecv()
} else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) | () {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = connStatusConnecting
c.recvPacketChannel = make(chan *packet, 1<<5)
c.sendPacketChannel = make(chan *packet, 1<<5)
c.rawUDPDataChan = make(chan []byte, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
go c.errWatcher()
// net io
go c.send()
go c.recv()
go c.resend()
go c.packetHandler()
return c, nil
}
func (c *RUDPConn) Read(b []byte) (int, error) {
readCnt := len(b)
n := len(b)
if n == 0 {
return 0, nil
}
curWrite := 0
if len(c.outputDataTmpBuffer) != 0 {
if n <= len(c.outputDataTmpBuffer) {
copy(b, c.outputDataTmpBuffer[:n])
c.outputDataTmpBuffer = c.outputDataTmpBuffer[n:]
return readCnt, nil
} else {
n -= len(c.outputDataTmpBuffer)
curWrite += len(c.outputDataTmpBuffer)
copy(b, c.outputDataTmpBuffer)
}
}
for n > 0 {
apacket := c.outputPacketQueue.consume()
if apacket.seqNumber - c.maxHasReadSeqNumber != 1 {
log("发生丢包 cur %d max %d\n", apacket.seqNumber, c.maxHasReadSeqNumber)
}
// apacket.print()
c.maxHasReadSeqNumber = apacket.seqNumber
data := apacket.payload
if n <= len(data) {
copy(b[curWrite:], data[:n])
c.outputDataTmpBuffer = data[n:]
return readCnt, nil
} else {
copy(b[curWrite:], data)
n -= len(data)
curWrite += len(data)
}
}
return 0, nil
}
func (c *RUDPConn) Write(b []byte) (int, error) {
if c.err != nil {
return 0, errors.New("rudp write error: " + c.err.Error())
}
n := len(b)
for {
if len(b) <= rudpPayloadLenLimit {
c.sendPacketChannel <- newNormalPacket(b, c.sendSeqNumber)
c.sendSeqNumber++
return n, nil
} else {
c.sendPacketChannel <- newNormalPacket(b[:rudpPayloadLenLimit], c.sendSeqNumber)
c.sendSeqNumber++
b = b[rudpPayloadLenLimit:]
}
}
}
// Close close must be called while closing the conn
func (c *RUDPConn) Close() error {
if c.rudpConnStatus != connStatusOpen {
return errors.New("the rudp connection is not open status!")
}
defer func() {
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
c.closeConnCallback()
}()
finSegment := newFinPacket().marshal()
n, err := c.write(finSegment)
if err != nil {
return err
}
if n != len(finSegment) {
return errors.New(RawUDPSendNotComplete)
}
c.errBus <- io.EOF
return nil
}
func (c *RUDPConn) resend() {
ticker := time.NewTicker(time.Millisecond * time.Duration(resendDelayThreshholdMS))
defer ticker.Stop()
for {
select {
case <-c.resendStop:
return
case <-ticker.C:
resendPacketList := c.resendPacketQueue.consumePacketSinceNMs(resendDelayThreshholdMS)
if len(resendPacketList) != 0 {
log("一轮重传\n")
}
for _, resendPacket := range resendPacketList {
segment := resendPacket.marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
log("重传 %d\n", resendPacket.seqNumber)
}
}
}
}
func (c *RUDPConn) LocalAddr() net.Addr {
return c.localAddr
}
func (c *RUDPConn) RemoteAddr() net.Addr {
return c.remoteAddr
}
func (c *RUDPConn) SetDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetReadDeadline(t time.Time) error {
return nil
}
func (c *RUDPConn) SetWriteDeadline(t time.Time) error {
return nil
}
| send | identifier_name |
plotXsectLimitComparison.py | #!/usr/bin/env python
import time
import os
import csv
import PlotUtils
import sys
#----------------------------------------------------------------------
# parameters
#----------------------------------------------------------------------
colorsToAvoid = [ 5, # yellow
]
#----------------------------------------------------------------------
def makePlot(csvFnames, relative, includeExpected = True, fermiophobic = None, ymax = None, inputIsAbs = None, drawXsectBR = False,
minMass = None,
maxMass = None,
plotLog = False
):
""" @param relative if true, the exclusion of the ratios
(relative to the inputs given) are plotted. If False,
these ratios are multiplied by the SM cross sections
and branching ratios into gamma gamma
@param inputIsAbs is a list (set) of file names which
should be treated as if they had ABSOLUTE limits on
cross sections rather than relative limits.
@param minMass and masMass can be used to restrict the
plotting range
"""
#--------------------
# read the files
#--------------------
data = []
color = 0
for fname in csvFnames:
while True:
color += 1
if color not in colorsToAvoid:
break
# define a name: if there is a comma in the file name assume that the
# part before the comma is the actual file name and the part after it
# is the label we should use
#
# if there is no comma, just use the basename (without .csv) as label
pos = fname.find(',')
if pos == -1:
# not found
name = os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
typeName = "FP"
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
"--fermiophobic",
dest="fermiophobicMode",
help="exclude gluon fusion and ttbar associated production and rescale the other two processes such that " + \
"their total cross section corresponds to the previous total",
default=False,
action="store_true",
)
parser.add_option(
"--save",
dest="outputFnames",
help="save the plot into a file (can be specified multiple times)",
default=[],
action="append",
)
parser.add_option(
"--plotLog",
help="plots y in log scale",
default=False,
action="store_true",
)
parser.add_option( | )
parser.add_option(
"--relative",
help="instead of plotting the absolute cross section exclusions, plot the relative (w.r.t. to the input signal)",
default=False,
action="store_true",
)
parser.add_option(
"--isabs",
help="specify that a given file contains ABSOLUTE rather than RELATIVE (w.r.t. to the standard cross section) limits. Files can either be specified by name (which must be the same string as given in the list of non-option arguments) or by position (starting from 1)",
default=[],
action="append",
)
parser.add_option(
"--theo-xsect",
help="add a line for the theoretical cross section times branching ratio",
default=False,
action="store_true",
dest = "theoXsect",
)
parser.add_option(
"--min-mass",
help="do not include values below the given mass",
default=None,
type=float,
dest = "minMass",
)
parser.add_option(
"--max-mass",
help="do not include values above the given mass",
default=None,
type=float,
dest = "maxMass",
)
parser.add_option(
"--legend-xleft",
help="NDC position of left side of legend",
default=0.7,
type=float,
dest = "legendXleft",
)
parser.add_option(
"--legend-xright",
help="NDC position of right side of legend",
default=0.9,
type=float,
dest = "legendXright",
)
parser.add_option(
"--legend-ybottom",
help="NDC position of bottom side of legend",
default=0.7,
type=float,
dest = "legendYbottom",
)
parser.add_option(
"--legend-ytop",
help="NDC position of top side of legend",
default=0.9,
type=float,
dest = "legendYtop",
)
parser.add_option(
"--no-title",
help="disable plotting of the title",
# note the inverted logic here
default = True,
dest = "showTitle",
action = "store_false",
)
parser.add_option(
"--observed-line-width",
help="line width for observed graphs (set to zero to NOT ot show them)",
type = int,
default = 4,
dest = "observedLineWidth",
)
(options, ARGV) = parser.parse_args()
#----------------------------------------
# process command line arguments
#----------------------------------------
if len(ARGV) < 1:
PlotUtils.optError("expected at least one non-option arguments.")
# check whether any files were specified to contain ABSOLUTE cross
# sections
isabs = options.isabs
options.isabs = set()
for value in isabs:
# check whether this is a file name
if value in ARGV:
# arriving here, it's a file name we know
options.isabs.add(value)
continue
# try by position
try:
index = int(value)-1
options.isabs.add(ARGV[index])
continue
except ValueError:
# string did not represent a valid integer
pass
except IndexError:
# out of index in array access
pass
print >> sys.stderr,"--isabs argument '%s' is neither a filename given nor a valid position" % value
sys.exit(1)
#----------------------------------------------------------------------
import ROOT
ROOT.gROOT.SetStyle("Plain")
gcSaver = []
# makePlot(open(ARGV[0]), relative = True)
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.eps")
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.png")
inputFnames = ARGV[:]
makePlot(inputFnames, relative = options.relative, fermiophobic = options.fermiophobicMode, ymax = options.ymax, inputIsAbs = options.isabs,
drawXsectBR = options.theoXsect,
minMass = options.minMass,
maxMass = options.maxMass,
plotLog = options.plotLog,
)
if options.outputFnames:
for fname in options.outputFnames:
print fname
ROOT.gPad.SaveAs(fname) | "--ymax",
type=float,
help="manually sepcify the y scale",
default=None, | random_line_split |
plotXsectLimitComparison.py | #!/usr/bin/env python
import time
import os
import csv
import PlotUtils
import sys
#----------------------------------------------------------------------
# parameters
#----------------------------------------------------------------------
colorsToAvoid = [ 5, # yellow
]
#----------------------------------------------------------------------
def makePlot(csvFnames, relative, includeExpected = True, fermiophobic = None, ymax = None, inputIsAbs = None, drawXsectBR = False,
minMass = None,
maxMass = None,
plotLog = False
):
""" @param relative if true, the exclusion of the ratios
(relative to the inputs given) are plotted. If False,
these ratios are multiplied by the SM cross sections
and branching ratios into gamma gamma
@param inputIsAbs is a list (set) of file names which
should be treated as if they had ABSOLUTE limits on
cross sections rather than relative limits.
@param minMass and masMass can be used to restrict the
plotting range
"""
#--------------------
# read the files
#--------------------
data = []
color = 0
for fname in csvFnames:
while True:
color += 1
if color not in colorsToAvoid:
break
# define a name: if there is a comma in the file name assume that the
# part before the comma is the actual file name and the part after it
# is the label we should use
#
# if there is no comma, just use the basename (without .csv) as label
pos = fname.find(',')
if pos == -1:
# not found
name = os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
|
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
"--fermiophobic",
dest="fermiophobicMode",
help="exclude gluon fusion and ttbar associated production and rescale the other two processes such that " + \
"their total cross section corresponds to the previous total",
default=False,
action="store_true",
)
parser.add_option(
"--save",
dest="outputFnames",
help="save the plot into a file (can be specified multiple times)",
default=[],
action="append",
)
parser.add_option(
"--plotLog",
help="plots y in log scale",
default=False,
action="store_true",
)
parser.add_option(
"--ymax",
type=float,
help="manually sepcify the y scale",
default=None,
)
parser.add_option(
"--relative",
help="instead of plotting the absolute cross section exclusions, plot the relative (w.r.t. to the input signal)",
default=False,
action="store_true",
)
parser.add_option(
"--isabs",
help="specify that a given file contains ABSOLUTE rather than RELATIVE (w.r.t. to the standard cross section) limits. Files can either be specified by name (which must be the same string as given in the list of non-option arguments) or by position (starting from 1)",
default=[],
action="append",
)
parser.add_option(
"--theo-xsect",
help="add a line for the theoretical cross section times branching ratio",
default=False,
action="store_true",
dest = "theoXsect",
)
parser.add_option(
"--min-mass",
help="do not include values below the given mass",
default=None,
type=float,
dest = "minMass",
)
parser.add_option(
"--max-mass",
help="do not include values above the given mass",
default=None,
type=float,
dest = "maxMass",
)
parser.add_option(
"--legend-xleft",
help="NDC position of left side of legend",
default=0.7,
type=float,
dest = "legendXleft",
)
parser.add_option(
"--legend-xright",
help="NDC position of right side of legend",
default=0.9,
type=float,
dest = "legendXright",
)
parser.add_option(
"--legend-ybottom",
help="NDC position of bottom side of legend",
default=0.7,
type=float,
dest = "legendYbottom",
)
parser.add_option(
"--legend-ytop",
help="NDC position of top side of legend",
default=0.9,
type=float,
dest = "legendYtop",
)
parser.add_option(
"--no-title",
help="disable plotting of the title",
# note the inverted logic here
default = True,
dest = "showTitle",
action = "store_false",
)
parser.add_option(
"--observed-line-width",
help="line width for observed graphs (set to zero to NOT ot show them)",
type = int,
default = 4,
dest = "observedLineWidth",
)
(options, ARGV) = parser.parse_args()
#----------------------------------------
# process command line arguments
#----------------------------------------
if len(ARGV) < 1:
PlotUtils.optError("expected at least one non-option arguments.")
# check whether any files were specified to contain ABSOLUTE cross
# sections
isabs = options.isabs
options.isabs = set()
for value in isabs:
# check whether this is a file name
if value in ARGV:
# arriving here, it's a file name we know
options.isabs.add(value)
continue
# try by position
try:
index = int(value)-1
options.isabs.add(ARGV[index])
continue
except ValueError:
# string did not represent a valid integer
pass
except IndexError:
# out of index in array access
pass
print >> sys.stderr,"--isabs argument '%s' is neither a filename given nor a valid position" % value
sys.exit(1)
#----------------------------------------------------------------------
import ROOT
ROOT.gROOT.SetStyle("Plain")
gcSaver = []
# makePlot(open(ARGV[0]), relative = True)
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.eps")
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.png")
inputFnames = ARGV[:]
makePlot(inputFnames, relative = options.relative, fermiophobic = options.fermiophobicMode, ymax = options.ymax, inputIsAbs = options.isabs,
drawXsectBR = options.theoXsect,
minMass = options.minMass,
maxMass = options.maxMass,
plotLog = options.plotLog,
)
if options.outputFnames:
for fname in options.outputFnames:
print fname
ROOT.gPad.SaveAs(fname)
| typeName = "FP" | conditional_block |
plotXsectLimitComparison.py | #!/usr/bin/env python
import time
import os
import csv
import PlotUtils
import sys
#----------------------------------------------------------------------
# parameters
#----------------------------------------------------------------------
colorsToAvoid = [ 5, # yellow
]
#----------------------------------------------------------------------
def makePlot(csvFnames, relative, includeExpected = True, fermiophobic = None, ymax = None, inputIsAbs = None, drawXsectBR = False,
minMass = None,
maxMass = None,
plotLog = False
):
|
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
"--fermiophobic",
dest="fermiophobicMode",
help="exclude gluon fusion and ttbar associated production and rescale the other two processes such that " + \
"their total cross section corresponds to the previous total",
default=False,
action="store_true",
)
parser.add_option(
"--save",
dest="outputFnames",
help="save the plot into a file (can be specified multiple times)",
default=[],
action="append",
)
parser.add_option(
"--plotLog",
help="plots y in log scale",
default=False,
action="store_true",
)
parser.add_option(
"--ymax",
type=float,
help="manually sepcify the y scale",
default=None,
)
parser.add_option(
"--relative",
help="instead of plotting the absolute cross section exclusions, plot the relative (w.r.t. to the input signal)",
default=False,
action="store_true",
)
parser.add_option(
"--isabs",
help="specify that a given file contains ABSOLUTE rather than RELATIVE (w.r.t. to the standard cross section) limits. Files can either be specified by name (which must be the same string as given in the list of non-option arguments) or by position (starting from 1)",
default=[],
action="append",
)
parser.add_option(
"--theo-xsect",
help="add a line for the theoretical cross section times branching ratio",
default=False,
action="store_true",
dest = "theoXsect",
)
parser.add_option(
"--min-mass",
help="do not include values below the given mass",
default=None,
type=float,
dest = "minMass",
)
parser.add_option(
"--max-mass",
help="do not include values above the given mass",
default=None,
type=float,
dest = "maxMass",
)
parser.add_option(
"--legend-xleft",
help="NDC position of left side of legend",
default=0.7,
type=float,
dest = "legendXleft",
)
parser.add_option(
"--legend-xright",
help="NDC position of right side of legend",
default=0.9,
type=float,
dest = "legendXright",
)
parser.add_option(
"--legend-ybottom",
help="NDC position of bottom side of legend",
default=0.7,
type=float,
dest = "legendYbottom",
)
parser.add_option(
"--legend-ytop",
help="NDC position of top side of legend",
default=0.9,
type=float,
dest = "legendYtop",
)
parser.add_option(
"--no-title",
help="disable plotting of the title",
# note the inverted logic here
default = True,
dest = "showTitle",
action = "store_false",
)
parser.add_option(
"--observed-line-width",
help="line width for observed graphs (set to zero to NOT ot show them)",
type = int,
default = 4,
dest = "observedLineWidth",
)
(options, ARGV) = parser.parse_args()
#----------------------------------------
# process command line arguments
#----------------------------------------
if len(ARGV) < 1:
PlotUtils.optError("expected at least one non-option arguments.")
# check whether any files were specified to contain ABSOLUTE cross
# sections
isabs = options.isabs
options.isabs = set()
for value in isabs:
# check whether this is a file name
if value in ARGV:
# arriving here, it's a file name we know
options.isabs.add(value)
continue
# try by position
try:
index = int(value)-1
options.isabs.add(ARGV[index])
continue
except ValueError:
# string did not represent a valid integer
pass
except IndexError:
# out of index in array access
pass
print >> sys.stderr,"--isabs argument '%s' is neither a filename given nor a valid position" % value
sys.exit(1)
#----------------------------------------------------------------------
import ROOT
ROOT.gROOT.SetStyle("Plain")
gcSaver = []
# makePlot(open(ARGV[0]), relative = True)
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.eps")
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.png")
inputFnames = ARGV[:]
makePlot(inputFnames, relative = options.relative, fermiophobic = options.fermiophobicMode, ymax = options.ymax, inputIsAbs = options.isabs,
drawXsectBR = options.theoXsect,
minMass = options.minMass,
maxMass = options.maxMass,
plotLog = options.plotLog,
)
if options.outputFnames:
for fname in options.outputFnames:
print fname
ROOT.gPad.SaveAs(fname)
| """ @param relative if true, the exclusion of the ratios
(relative to the inputs given) are plotted. If False,
these ratios are multiplied by the SM cross sections
and branching ratios into gamma gamma
@param inputIsAbs is a list (set) of file names which
should be treated as if they had ABSOLUTE limits on
cross sections rather than relative limits.
@param minMass and masMass can be used to restrict the
plotting range
"""
#--------------------
# read the files
#--------------------
data = []
color = 0
for fname in csvFnames:
while True:
color += 1
if color not in colorsToAvoid:
break
# define a name: if there is a comma in the file name assume that the
# part before the comma is the actual file name and the part after it
# is the label we should use
#
# if there is no comma, just use the basename (without .csv) as label
pos = fname.find(',')
if pos == -1:
# not found
name = os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
typeName = "FP"
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified() | identifier_body |
plotXsectLimitComparison.py | #!/usr/bin/env python
import time
import os
import csv
import PlotUtils
import sys
#----------------------------------------------------------------------
# parameters
#----------------------------------------------------------------------
colorsToAvoid = [ 5, # yellow
]
#----------------------------------------------------------------------
def | (csvFnames, relative, includeExpected = True, fermiophobic = None, ymax = None, inputIsAbs = None, drawXsectBR = False,
minMass = None,
maxMass = None,
plotLog = False
):
""" @param relative if true, the exclusion of the ratios
(relative to the inputs given) are plotted. If False,
these ratios are multiplied by the SM cross sections
and branching ratios into gamma gamma
@param inputIsAbs is a list (set) of file names which
should be treated as if they had ABSOLUTE limits on
cross sections rather than relative limits.
@param minMass and masMass can be used to restrict the
plotting range
"""
#--------------------
# read the files
#--------------------
data = []
color = 0
for fname in csvFnames:
while True:
color += 1
if color not in colorsToAvoid:
break
# define a name: if there is a comma in the file name assume that the
# part before the comma is the actual file name and the part after it
# is the label we should use
#
# if there is no comma, just use the basename (without .csv) as label
pos = fname.find(',')
if pos == -1:
# not found
name = os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
typeName = "FP"
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
"--fermiophobic",
dest="fermiophobicMode",
help="exclude gluon fusion and ttbar associated production and rescale the other two processes such that " + \
"their total cross section corresponds to the previous total",
default=False,
action="store_true",
)
parser.add_option(
"--save",
dest="outputFnames",
help="save the plot into a file (can be specified multiple times)",
default=[],
action="append",
)
parser.add_option(
"--plotLog",
help="plots y in log scale",
default=False,
action="store_true",
)
parser.add_option(
"--ymax",
type=float,
help="manually sepcify the y scale",
default=None,
)
parser.add_option(
"--relative",
help="instead of plotting the absolute cross section exclusions, plot the relative (w.r.t. to the input signal)",
default=False,
action="store_true",
)
parser.add_option(
"--isabs",
help="specify that a given file contains ABSOLUTE rather than RELATIVE (w.r.t. to the standard cross section) limits. Files can either be specified by name (which must be the same string as given in the list of non-option arguments) or by position (starting from 1)",
default=[],
action="append",
)
parser.add_option(
"--theo-xsect",
help="add a line for the theoretical cross section times branching ratio",
default=False,
action="store_true",
dest = "theoXsect",
)
parser.add_option(
"--min-mass",
help="do not include values below the given mass",
default=None,
type=float,
dest = "minMass",
)
parser.add_option(
"--max-mass",
help="do not include values above the given mass",
default=None,
type=float,
dest = "maxMass",
)
parser.add_option(
"--legend-xleft",
help="NDC position of left side of legend",
default=0.7,
type=float,
dest = "legendXleft",
)
parser.add_option(
"--legend-xright",
help="NDC position of right side of legend",
default=0.9,
type=float,
dest = "legendXright",
)
parser.add_option(
"--legend-ybottom",
help="NDC position of bottom side of legend",
default=0.7,
type=float,
dest = "legendYbottom",
)
parser.add_option(
"--legend-ytop",
help="NDC position of top side of legend",
default=0.9,
type=float,
dest = "legendYtop",
)
parser.add_option(
"--no-title",
help="disable plotting of the title",
# note the inverted logic here
default = True,
dest = "showTitle",
action = "store_false",
)
parser.add_option(
"--observed-line-width",
help="line width for observed graphs (set to zero to NOT ot show them)",
type = int,
default = 4,
dest = "observedLineWidth",
)
(options, ARGV) = parser.parse_args()
#----------------------------------------
# process command line arguments
#----------------------------------------
if len(ARGV) < 1:
PlotUtils.optError("expected at least one non-option arguments.")
# check whether any files were specified to contain ABSOLUTE cross
# sections
isabs = options.isabs
options.isabs = set()
for value in isabs:
# check whether this is a file name
if value in ARGV:
# arriving here, it's a file name we know
options.isabs.add(value)
continue
# try by position
try:
index = int(value)-1
options.isabs.add(ARGV[index])
continue
except ValueError:
# string did not represent a valid integer
pass
except IndexError:
# out of index in array access
pass
print >> sys.stderr,"--isabs argument '%s' is neither a filename given nor a valid position" % value
sys.exit(1)
#----------------------------------------------------------------------
import ROOT
ROOT.gROOT.SetStyle("Plain")
gcSaver = []
# makePlot(open(ARGV[0]), relative = True)
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.eps")
# ROOT.gPad.SaveAs("xsect-ratio-exclusion.png")
inputFnames = ARGV[:]
makePlot(inputFnames, relative = options.relative, fermiophobic = options.fermiophobicMode, ymax = options.ymax, inputIsAbs = options.isabs,
drawXsectBR = options.theoXsect,
minMass = options.minMass,
maxMass = options.maxMass,
plotLog = options.plotLog,
)
if options.outputFnames:
for fname in options.outputFnames:
print fname
ROOT.gPad.SaveAs(fname)
| makePlot | identifier_name |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() |
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_adds_client_address_to_state() {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
}
| {
test::common_tests::async_echo(TestServer::new, TestServer::client)
} | identifier_body |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn | () {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
}
| async_test_server_adds_client_address_to_state | identifier_name |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") | else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_adds_client_address_to_state() {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
}
| {
tcp.connected().negotiated_h2()
} | conditional_block |
test.rs | //! Contains helpers for Gotham applications to use during testing.
//!
//! See the [`TestServer`] and [`AsyncTestServer`] types for example usage.
use std::convert::TryFrom;
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use futures_util::future::{BoxFuture, FutureExt};
use hyper::client::connect::{Connected, Connection};
use hyper::service::Service;
use hyper::Uri;
use log::info;
use pin_project::pin_project;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::time::Sleep;
use tokio_rustls::client::TlsStream;
use tokio_rustls::rustls::{
self, Certificate, ClientConfig, PrivateKey, RootCertStore, ServerConfig, ServerName,
};
use tokio_rustls::TlsConnector;
use crate::handler::NewHandler;
use crate::test::async_test::{AsyncTestClient, AsyncTestServerInner};
use crate::test::{self, TestClient, TestServerData};
use crate::tls::rustls_wrap;
fn server_config() -> ServerConfig {
let cert = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr; | Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_adds_client_address_to_state() {
async_test::common_tests::adds_client_address_to_state(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
} |
async move {
match TcpStream::connect(address).await { | random_line_split |
databases.go | // Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sql
import (
"strings"
"time"
)
const (
// InformationSchemaDatabaseName is the name of the information schema database.
InformationSchemaDatabaseName = "information_schema"
)
// DatabaseProvider is the fundamental interface to integrate with the engine. It provides access to all databases in
// a given backend. A DatabaseProvider is provided to the Catalog when the engine is initialized.
type DatabaseProvider interface {
// Database gets a Database from the provider.
Database(ctx *Context, name string) (Database, error)
// HasDatabase checks if the Database exists in the provider.
HasDatabase(ctx *Context, name string) bool
// AllDatabases returns a slice of all Databases in the provider.
AllDatabases(ctx *Context) []Database
}
// MutableDatabaseProvider is a DatabaseProvider that can create and drop databases.
type MutableDatabaseProvider interface {
DatabaseProvider
// CreateDatabase creates a database and adds it to the provider's collection.
CreateDatabase(ctx *Context, name string) error
// DropDatabase removes a database from the provider's collection.
DropDatabase(ctx *Context, name string) error
}
// CollatedDatabaseProvider is a DatabaseProvider that can create a Database with a specific collation.
type CollatedDatabaseProvider interface {
MutableDatabaseProvider
// CreateCollatedDatabase creates a collated database and adds it to the provider's collection.
CreateCollatedDatabase(ctx *Context, name string, collation CollationID) error
}
// TableFunctionProvider is an interface that allows custom table functions to be provided. It's usually (but not
// always) implemented by a DatabaseProvider.
type TableFunctionProvider interface {
// TableFunction returns the table function with the name provided, case-insensitive
TableFunction(ctx *Context, name string) (TableFunction, error)
}
// Database represents the database. Its primary job is to provide access to all tables.
type Database interface {
Nameable
// GetTableInsensitive retrieves a table by its case-insensitive name. To be SQL compliant, databases should not
// allow two tables with the same case-insensitive name. Behavior is undefined when two tables have the same
// case-insensitive name.
GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error)
// GetTableNames returns the table names of every table in the database. It does not return the names of temporary
// tables
GetTableNames(ctx *Context) ([]string, error)
}
// Databaser is a node that contains a reference to a database.
type Databaser interface {
// Database the current database.
Database() Database
// WithDatabase returns a new node instance with the database replaced with
// the one given as parameter.
WithDatabase(Database) (Node, error)
}
// Databaseable is a node with a string reference to a database
type Databaseable interface {
Database() string
}
// MultiDatabaser is a node that contains a reference to a database provider. This interface is intended for very
// specific nodes that must resolve databases during execution time rather than during analysis, such as block
// statements where the execution of a nested statement in the block may affect future statements within that same block.
type MultiDatabaser interface {
// DatabaseProvider returns the current DatabaseProvider.
DatabaseProvider() DatabaseProvider
// WithDatabaseProvider returns a new node instance with the database provider replaced with the one given as parameter.
WithDatabaseProvider(DatabaseProvider) (Node, error)
}
// ReadOnlyDatabase is an extension of Database that may declare itself read-only, which will disallow any DDL or DML
// statements from executing.
type ReadOnlyDatabase interface {
Database
// IsReadOnly returns whether this database is read-only.
IsReadOnly() bool
}
// TableCreator is a Database that can create new tables.
type TableCreator interface {
Database
// CreateTable creates the table with the given name and schema.
CreateTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// IndexedTableCreator is a Database that can create new tables which have a Primary Key with columns that have
// prefix lengths.
type IndexedTableCreator interface {
Database
// CreateIndexedTable creates the table with the given name and schema using the index definition provided for its
// primary key index.
CreateIndexedTable(ctx *Context, name string, schema PrimaryKeySchema, idxDef IndexDef, collation CollationID) error
}
// TemporaryTableCreator is a database that can create temporary tables that persist only as long as the session.
// Note that temporary tables with the same name as persisted tables take precedence in most SQL operations.
type TemporaryTableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will
// handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database.
GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) |
// DBTableIter iterates over all tables returned by db.GetTableNames() calling cb for each one until all tables have
// been processed, or an error is returned from the callback, or the cont flag is false when returned from the callback.
func DBTableIter(ctx *Context, db Database, cb func(Table) (cont bool, err error)) error {
names, err := db.GetTableNames(ctx)
if err != nil {
return err
}
for _, name := range names {
tbl, ok, err := db.GetTableInsensitive(ctx, name)
if err != nil {
return err
} else if !ok {
return ErrTableNotFound.New(name)
}
cont, err := cb(tbl)
if err != nil {
return err
}
if !cont {
break
}
}
return nil
}
// UnresolvedDatabase is a database which has not been resolved yet.
type UnresolvedDatabase string
var _ Database = UnresolvedDatabase("")
// Name returns the database name.
func (d UnresolvedDatabase) Name() string {
return string(d)
}
// Tables returns the tables in the database.
func (UnresolvedDatabase) Tables() map[string]Table {
return make(map[string]Table)
}
func (UnresolvedDatabase) GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error) {
return nil, false, nil
}
func (UnresolvedDatabase) GetTableNames(ctx *Context) ([]string, error) {
return []string{}, nil
}
| {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
} | identifier_body |
databases.go | // Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sql
import (
"strings"
"time"
)
const (
// InformationSchemaDatabaseName is the name of the information schema database.
InformationSchemaDatabaseName = "information_schema"
)
// DatabaseProvider is the fundamental interface to integrate with the engine. It provides access to all databases in
// a given backend. A DatabaseProvider is provided to the Catalog when the engine is initialized.
type DatabaseProvider interface {
// Database gets a Database from the provider.
Database(ctx *Context, name string) (Database, error)
// HasDatabase checks if the Database exists in the provider.
HasDatabase(ctx *Context, name string) bool
// AllDatabases returns a slice of all Databases in the provider.
AllDatabases(ctx *Context) []Database
}
// MutableDatabaseProvider is a DatabaseProvider that can create and drop databases.
type MutableDatabaseProvider interface {
DatabaseProvider
// CreateDatabase creates a database and adds it to the provider's collection.
CreateDatabase(ctx *Context, name string) error
// DropDatabase removes a database from the provider's collection.
DropDatabase(ctx *Context, name string) error
}
// CollatedDatabaseProvider is a DatabaseProvider that can create a Database with a specific collation.
type CollatedDatabaseProvider interface {
MutableDatabaseProvider
// CreateCollatedDatabase creates a collated database and adds it to the provider's collection.
CreateCollatedDatabase(ctx *Context, name string, collation CollationID) error
}
// TableFunctionProvider is an interface that allows custom table functions to be provided. It's usually (but not
// always) implemented by a DatabaseProvider.
type TableFunctionProvider interface {
// TableFunction returns the table function with the name provided, case-insensitive
TableFunction(ctx *Context, name string) (TableFunction, error)
}
// Database represents the database. Its primary job is to provide access to all tables.
type Database interface {
Nameable
// GetTableInsensitive retrieves a table by its case-insensitive name. To be SQL compliant, databases should not
// allow two tables with the same case-insensitive name. Behavior is undefined when two tables have the same
// case-insensitive name.
GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error)
// GetTableNames returns the table names of every table in the database. It does not return the names of temporary
// tables
GetTableNames(ctx *Context) ([]string, error)
}
// Databaser is a node that contains a reference to a database.
type Databaser interface {
// Database the current database.
Database() Database
// WithDatabase returns a new node instance with the database replaced with
// the one given as parameter.
WithDatabase(Database) (Node, error)
}
// Databaseable is a node with a string reference to a database
type Databaseable interface {
Database() string
}
// MultiDatabaser is a node that contains a reference to a database provider. This interface is intended for very
// specific nodes that must resolve databases during execution time rather than during analysis, such as block
// statements where the execution of a nested statement in the block may affect future statements within that same block.
type MultiDatabaser interface {
// DatabaseProvider returns the current DatabaseProvider.
DatabaseProvider() DatabaseProvider
// WithDatabaseProvider returns a new node instance with the database provider replaced with the one given as parameter.
WithDatabaseProvider(DatabaseProvider) (Node, error)
}
// ReadOnlyDatabase is an extension of Database that may declare itself read-only, which will disallow any DDL or DML
// statements from executing.
type ReadOnlyDatabase interface {
Database
// IsReadOnly returns whether this database is read-only.
IsReadOnly() bool
}
// TableCreator is a Database that can create new tables.
type TableCreator interface {
Database
// CreateTable creates the table with the given name and schema.
CreateTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// IndexedTableCreator is a Database that can create new tables which have a Primary Key with columns that have
// prefix lengths.
type IndexedTableCreator interface {
Database
// CreateIndexedTable creates the table with the given name and schema using the index definition provided for its
// primary key index.
CreateIndexedTable(ctx *Context, name string, schema PrimaryKeySchema, idxDef IndexDef, collation CollationID) error
}
// TemporaryTableCreator is a database that can create temporary tables that persist only as long as the session.
// Note that temporary tables with the same name as persisted tables take precedence in most SQL operations.
type TemporaryTableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will
// handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database.
GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
}
// DBTableIter iterates over all tables returned by db.GetTableNames() calling cb for each one until all tables have
// been processed, or an error is returned from the callback, or the cont flag is false when returned from the callback.
func DBTableIter(ctx *Context, db Database, cb func(Table) (cont bool, err error)) error {
names, err := db.GetTableNames(ctx)
if err != nil {
return err
}
for _, name := range names {
tbl, ok, err := db.GetTableInsensitive(ctx, name)
if err != nil | else if !ok {
return ErrTableNotFound.New(name)
}
cont, err := cb(tbl)
if err != nil {
return err
}
if !cont {
break
}
}
return nil
}
// UnresolvedDatabase is a database which has not been resolved yet.
type UnresolvedDatabase string
var _ Database = UnresolvedDatabase("")
// Name returns the database name.
func (d UnresolvedDatabase) Name() string {
return string(d)
}
// Tables returns the tables in the database.
func (UnresolvedDatabase) Tables() map[string]Table {
return make(map[string]Table)
}
func (UnresolvedDatabase) GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error) {
return nil, false, nil
}
func (UnresolvedDatabase) GetTableNames(ctx *Context) ([]string, error) {
return []string{}, nil
}
| {
return err
} | conditional_block |
databases.go | // Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sql
import (
"strings"
"time"
)
const (
// InformationSchemaDatabaseName is the name of the information schema database.
InformationSchemaDatabaseName = "information_schema"
)
// DatabaseProvider is the fundamental interface to integrate with the engine. It provides access to all databases in
// a given backend. A DatabaseProvider is provided to the Catalog when the engine is initialized.
type DatabaseProvider interface {
// Database gets a Database from the provider.
Database(ctx *Context, name string) (Database, error)
// HasDatabase checks if the Database exists in the provider.
HasDatabase(ctx *Context, name string) bool
// AllDatabases returns a slice of all Databases in the provider.
AllDatabases(ctx *Context) []Database
}
// MutableDatabaseProvider is a DatabaseProvider that can create and drop databases.
type MutableDatabaseProvider interface {
DatabaseProvider
// CreateDatabase creates a database and adds it to the provider's collection.
CreateDatabase(ctx *Context, name string) error
// DropDatabase removes a database from the provider's collection.
DropDatabase(ctx *Context, name string) error
}
// CollatedDatabaseProvider is a DatabaseProvider that can create a Database with a specific collation.
type CollatedDatabaseProvider interface {
MutableDatabaseProvider
// CreateCollatedDatabase creates a collated database and adds it to the provider's collection.
CreateCollatedDatabase(ctx *Context, name string, collation CollationID) error
}
// TableFunctionProvider is an interface that allows custom table functions to be provided. It's usually (but not
// always) implemented by a DatabaseProvider.
type TableFunctionProvider interface {
// TableFunction returns the table function with the name provided, case-insensitive
TableFunction(ctx *Context, name string) (TableFunction, error)
}
// Database represents the database. Its primary job is to provide access to all tables.
type Database interface {
Nameable
// GetTableInsensitive retrieves a table by its case-insensitive name. To be SQL compliant, databases should not
// allow two tables with the same case-insensitive name. Behavior is undefined when two tables have the same
// case-insensitive name.
GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error)
// GetTableNames returns the table names of every table in the database. It does not return the names of temporary
// tables
GetTableNames(ctx *Context) ([]string, error)
}
// Databaser is a node that contains a reference to a database.
type Databaser interface {
// Database the current database.
Database() Database
// WithDatabase returns a new node instance with the database replaced with
// the one given as parameter.
WithDatabase(Database) (Node, error)
}
// Databaseable is a node with a string reference to a database
type Databaseable interface {
Database() string
}
// MultiDatabaser is a node that contains a reference to a database provider. This interface is intended for very
// specific nodes that must resolve databases during execution time rather than during analysis, such as block
// statements where the execution of a nested statement in the block may affect future statements within that same block.
type MultiDatabaser interface {
// DatabaseProvider returns the current DatabaseProvider.
DatabaseProvider() DatabaseProvider
// WithDatabaseProvider returns a new node instance with the database provider replaced with the one given as parameter.
WithDatabaseProvider(DatabaseProvider) (Node, error)
}
// ReadOnlyDatabase is an extension of Database that may declare itself read-only, which will disallow any DDL or DML
// statements from executing.
type ReadOnlyDatabase interface {
Database
// IsReadOnly returns whether this database is read-only.
IsReadOnly() bool
}
// TableCreator is a Database that can create new tables.
type TableCreator interface {
Database
// CreateTable creates the table with the given name and schema.
CreateTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// IndexedTableCreator is a Database that can create new tables which have a Primary Key with columns that have
// prefix lengths.
type IndexedTableCreator interface {
Database
// CreateIndexedTable creates the table with the given name and schema using the index definition provided for its
// primary key index.
CreateIndexedTable(ctx *Context, name string, schema PrimaryKeySchema, idxDef IndexDef, collation CollationID) error
}
// TemporaryTableCreator is a database that can create temporary tables that persist only as long as the session.
// Note that temporary tables with the same name as persisted tables take precedence in most SQL operations.
type TemporaryTableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will
// handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database.
GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
}
// DBTableIter iterates over all tables returned by db.GetTableNames() calling cb for each one until all tables have
// been processed, or an error is returned from the callback, or the cont flag is false when returned from the callback.
func DBTableIter(ctx *Context, db Database, cb func(Table) (cont bool, err error)) error {
names, err := db.GetTableNames(ctx)
if err != nil {
return err
}
for _, name := range names {
tbl, ok, err := db.GetTableInsensitive(ctx, name)
if err != nil {
return err
} else if !ok {
return ErrTableNotFound.New(name)
}
cont, err := cb(tbl)
if err != nil {
return err
}
if !cont {
break
}
}
return nil
}
// UnresolvedDatabase is a database which has not been resolved yet.
type UnresolvedDatabase string
var _ Database = UnresolvedDatabase("")
// Name returns the database name.
func (d UnresolvedDatabase) Name() string {
return string(d)
}
// Tables returns the tables in the database.
func (UnresolvedDatabase) Tables() map[string]Table {
return make(map[string]Table)
}
func (UnresolvedDatabase) | (ctx *Context, tblName string) (Table, bool, error) {
return nil, false, nil
}
func (UnresolvedDatabase) GetTableNames(ctx *Context) ([]string, error) {
return []string{}, nil
}
| GetTableInsensitive | identifier_name |
databases.go | // Copyright 2022 Dolthub, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sql
import (
"strings"
"time"
)
const (
// InformationSchemaDatabaseName is the name of the information schema database.
InformationSchemaDatabaseName = "information_schema"
)
// DatabaseProvider is the fundamental interface to integrate with the engine. It provides access to all databases in
// a given backend. A DatabaseProvider is provided to the Catalog when the engine is initialized.
type DatabaseProvider interface {
// Database gets a Database from the provider.
Database(ctx *Context, name string) (Database, error)
// HasDatabase checks if the Database exists in the provider.
HasDatabase(ctx *Context, name string) bool
// AllDatabases returns a slice of all Databases in the provider.
AllDatabases(ctx *Context) []Database
}
// MutableDatabaseProvider is a DatabaseProvider that can create and drop databases.
type MutableDatabaseProvider interface {
DatabaseProvider
// CreateDatabase creates a database and adds it to the provider's collection.
CreateDatabase(ctx *Context, name string) error
// DropDatabase removes a database from the provider's collection.
DropDatabase(ctx *Context, name string) error
}
// CollatedDatabaseProvider is a DatabaseProvider that can create a Database with a specific collation.
type CollatedDatabaseProvider interface {
MutableDatabaseProvider
// CreateCollatedDatabase creates a collated database and adds it to the provider's collection.
CreateCollatedDatabase(ctx *Context, name string, collation CollationID) error
}
// TableFunctionProvider is an interface that allows custom table functions to be provided. It's usually (but not
// always) implemented by a DatabaseProvider.
type TableFunctionProvider interface {
// TableFunction returns the table function with the name provided, case-insensitive
TableFunction(ctx *Context, name string) (TableFunction, error)
}
// Database represents the database. Its primary job is to provide access to all tables.
type Database interface {
Nameable
// GetTableInsensitive retrieves a table by its case-insensitive name. To be SQL compliant, databases should not
// allow two tables with the same case-insensitive name. Behavior is undefined when two tables have the same
// case-insensitive name.
GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error)
// GetTableNames returns the table names of every table in the database. It does not return the names of temporary
// tables
GetTableNames(ctx *Context) ([]string, error)
}
// Databaser is a node that contains a reference to a database.
type Databaser interface {
// Database the current database.
Database() Database
// WithDatabase returns a new node instance with the database replaced with
// the one given as parameter.
WithDatabase(Database) (Node, error)
}
// Databaseable is a node with a string reference to a database
type Databaseable interface {
Database() string
}
// MultiDatabaser is a node that contains a reference to a database provider. This interface is intended for very
// specific nodes that must resolve databases during execution time rather than during analysis, such as block
// statements where the execution of a nested statement in the block may affect future statements within that same block.
type MultiDatabaser interface {
// DatabaseProvider returns the current DatabaseProvider.
DatabaseProvider() DatabaseProvider
// WithDatabaseProvider returns a new node instance with the database provider replaced with the one given as parameter.
WithDatabaseProvider(DatabaseProvider) (Node, error)
}
// ReadOnlyDatabase is an extension of Database that may declare itself read-only, which will disallow any DDL or DML
// statements from executing.
type ReadOnlyDatabase interface {
Database
// IsReadOnly returns whether this database is read-only.
IsReadOnly() bool
}
// TableCreator is a Database that can create new tables.
type TableCreator interface {
Database
// CreateTable creates the table with the given name and schema.
CreateTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// IndexedTableCreator is a Database that can create new tables which have a Primary Key with columns that have
// prefix lengths.
type IndexedTableCreator interface {
Database
// CreateIndexedTable creates the table with the given name and schema using the index definition provided for its
// primary key index.
CreateIndexedTable(ctx *Context, name string, schema PrimaryKeySchema, idxDef IndexDef, collation CollationID) error
}
// TemporaryTableCreator is a database that can create temporary tables that persist only as long as the session.
// Note that temporary tables with the same name as persisted tables take precedence in most SQL operations.
type TemporaryTableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will | GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
}
// DBTableIter iterates over all tables returned by db.GetTableNames() calling cb for each one until all tables have
// been processed, or an error is returned from the callback, or the cont flag is false when returned from the callback.
func DBTableIter(ctx *Context, db Database, cb func(Table) (cont bool, err error)) error {
names, err := db.GetTableNames(ctx)
if err != nil {
return err
}
for _, name := range names {
tbl, ok, err := db.GetTableInsensitive(ctx, name)
if err != nil {
return err
} else if !ok {
return ErrTableNotFound.New(name)
}
cont, err := cb(tbl)
if err != nil {
return err
}
if !cont {
break
}
}
return nil
}
// UnresolvedDatabase is a database which has not been resolved yet.
type UnresolvedDatabase string
var _ Database = UnresolvedDatabase("")
// Name returns the database name.
func (d UnresolvedDatabase) Name() string {
return string(d)
}
// Tables returns the tables in the database.
func (UnresolvedDatabase) Tables() map[string]Table {
return make(map[string]Table)
}
func (UnresolvedDatabase) GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error) {
return nil, false, nil
}
func (UnresolvedDatabase) GetTableNames(ctx *Context) ([]string, error) {
return []string{}, nil
} | // handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database. | random_line_split |
funcs.py | from __future__ import print_function
class DataError(Exception):
def __init__(self, string=None):
if string != None:
self.message = string
def __str__(self):
return self.message
def get3DEllipse(t,y,x):
import numpy as np
at=np.arange(-t,t+1)
ax=np.arange(-x,x+1)
ay=np.arange(-y,y+1)
T,Y,X=np.meshgrid(at,ay,ax,indexing='ij')
dd=(X/float(x))**2+(Y/float(y))**2+(T/float(t))**2
return np.where(dd<=1,1,0)
def getQuantiles(slab,percents=None,verbose=True):
'''Find quantiles of a slab
<slab>: ndarray, whose quantiles will be found.
<percents>: float or a list of floats, left percentage(s). Right quantiles
will be found by (1-percentage).
Return <quantiles>: nested list of left and right quantiles for corresponding
percentages.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2018-05-18 12:55:31.
'''
import numpy
if percents is None:
percents=numpy.array([0.001,0.005,0.01,0.025,0.05,0.1])
percents=numpy.array(percents)
if percents.ndim!=1:
raise Exception("<percents> needs to be a 1D array.")
#-------Remove nans and masked values--------
mask=getMissingMask(slab)
slab=numpy.array(slab)
slab=slab[numpy.where(mask==False)]
flatten=slab.flatten()
flatten.sort()
n=len(flatten)
qlidx=(n*percents).astype('int')
qridx=(n*(1-percents)).astype('int')
ql=flatten[qlidx]
qr=flatten[qridx]
quantiles=zip(ql,qr)
if verbose:
for ii,pii in enumerate(percents):
print('# <getQuantiles>: %0.3f left quantile: %f. %0.3f right quantile: %f.'\
%(pii,ql[ii],1-pii,qr[ii]))
return quantiles
#-------Copies selected attributes from source object to dict--
def attribute_obj2dict(source_object,dictionary=None,verbose=False):
'''Copies selected attributes from source object to dict
to <dictionary>.
<source_object>: object from which attributes are copied.
<dictionary>: None or dict. If None, create a new dict to store
the result. If a dict, use attributes from <source_object>
to overwrite or fill the dict.
Update time: 2016-01-18 11:00:55.
'''
if dictionary is None:
dictionary={}
#------------------Attribute list------------------
att_list=['name','id','dataset','source','title','long_name','standard_name',\
'units','syno','end','harms','filename','comments','description']
#-----------------Copy attributes-----------------
for att in att_list:
if hasattr(source_object,att):
dictionary[att]=getattr(source_object,att).strip()
if verbose:
print('\n# <attribute_obj2dict>: %s: %s' %(att, dictionary[att]))
return dictionary
#-------------Copy attributes from dict to target object----------
def attribute_dict2obj(dictionary,target_object,verbose=False):
'''Copies attributes from dictionary to target object.
<dictionary>: dict, contains attributes to copy.
<target_object>: obj, attributes are copied to.
Return <target_object>: target object with new attributes.
Update time: 2016-01-18 11:31:25.
'''
for att in dictionary.keys():
setattr(target_object,att,dictionary[att])
if verbose:
print('\n# <attribute_dict2obj>: Copy attribute: %s = %s' %(att,dictionary[att]))
return target_object
#-------------------Add an extra axis to a data slab -------------
def | (slab,newaxis=None,axis=0,verbose=False):
"""Adds an extra axis to a data slab.
<slab>: variable to which the axis is to insert.
<newaxis>: axis object, could be of any length. If None, create a dummy
singleton axis.
<axis>: index of axis to be inserted, e.g. 0 if <newaxis> is inserted
as the 1st dimension.
Return: <slab2>.
Update time: 2013-10-09 12:34:32.
"""
import cdms2 as cdms
import MV2 as MV
if newaxis is None:
newaxis=cdms.createAxis([1,])
newaxis.units=''
# add new axis to axis list of input <slab>
axislist=slab.getAxisList()
axislist.insert(axis,newaxis)
#----------------Reshape----------------
shape=list(slab.shape)
shape.insert(axis,len(newaxis))
slab2=MV.reshape(slab,shape)
#------------Create variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
'''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
return axis
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1-lon2)
numerator=(cos(lat2)*sin(dlon))**2 + \
(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon))**2
numerator=np.sqrt(numerator)
denominator=sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(dlon)
dsigma=np.arctan2(numerator,denominator)
arc=r*dsigma
if verbose:
print('\n# <greatCircle>: <dsigma>:',dsigma)
print('# <greatCircle>: <arc>:', arc)
return arc
#----------------------Get a slab from a variable----------------------
def getSlab(var,index1=-1,index2=-2,verbose=True):
'''Get a slab from a variable
<var>: nd array with dimension >=2.
<index1>,<index2>: str, indices denoting the dimensions from which a slab is to slice.
Return <slab>: the (1st) slab from <var>.
E.g. <var> has dimension (12,1,241,480), getSlab(var) will
return the 1st time point with singleton dimension squeezed.
Update time: 2015-07-14 19:23:42.
'''
import numpy
ndim=numpy.ndim(var)
if ndim<2:
raise DataError('Dimension in <var> is smaller than 2.')
if ndim==2:
return var
slab='dummy'
slicer=['0',]*ndim
slicer[index1]=':'
slicer[index2]=':'
string='slab=var[%s]' %','.join(slicer)
exec(string)
return slab
#-----------------Change latitude axis to south-to-north---------------------------
def increasingLatitude(slab,verbose=False):
'''Changes a slab so that is always has latitude running from
south to north.
<slab>: input transientvariable. Need to have a proper latitude axis.
Return: <slab2>, if latitude axis is reversed, or <slab> otherwise.
If <slab> has a latitude axis, and the latitudes run from north to south, a
copy <slab2> is made with the latitudes reversed, i.e., running from south
to north.
Update time: 2016-01-18 11:58:11.
'''
latax=getAxis('lat',slab)
'''
try:
latax=slab.getLatitude()
except:
raise DataError('Failed to obtain latitude axis from <slab>.')
if latax is None:
raise DataError('Failed to obtain latitude axis from <slab>.')
'''
#-----Reverse latitudes if necessary------------------
if latax[0]>latax[-1]:
if verbose:
print('\n# <increasingLatitude>: Reversing latitude axis.')
slab2=slab(latitude=(latax[-1],latax[0]))
return slab2
else:
if verbose:
print('\n# <increasingLatitude>: Latitude axis correct. Not changing.')
return slab
#----------Delta_Latitude----------------------------
def dLongitude(var,side='c',R=6371000):
'''Return a slab of longitudinal increment (meter) delta_x.
<var>: variable from which latitude axis is obtained;
<side>: 'n': northern boundary of each latitudinal band;
's': southern boundary of each latitudinal band;
'c': central line of latitudinal band;
----- 'n'
/-----\ 'c'
/_______\ 's'
<R>: radius of Earth;
Return <delta_x>, a 2-D slab with grid information copied from\
<var>.
UPDATE: 2014-08-05 11:12:27:
In computing <delta_x>, the longitudinal increment should be taken
from the actual longitude axis (bounds).
Fortunately this is not affecting any previous computations which are all
globally.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#----------Get axes---------------------
var=increasingLatitude(var)
lonax=var.getLongitude()
latax_bounds=latax.getBounds()
lonax_bounds=lonax.getBounds()
lon_increment=numpy.ptp(lonax_bounds,axis=1)*numpy.pi/180.
if side=='n':
lats=latax_bounds.max(axis=1)
elif side=='c':
lats=latax[:]
elif side=='s':
lats=latax_bounds.min(axis=1)
lats=abs(lats)*numpy.pi/180.
delta_x=R*numpy.cos(lats)[:,None]*lon_increment[None,:]
delta_x=MV.where(delta_x<=1e-8,1,delta_x)
delta_x.setAxisList((latax,lonax))
return delta_x
#----------Delta_Longitude----------------------------
def dLatitude(var,R=6371000,verbose=True):
'''Return a slab of latitudinal increment (meter) delta_y.
<var>: variable from which latitude axis is abtained;
<R>: radius of Earth;
Return <delta_y>, a 2-D slab with grid information copied from\
<var>.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#---------Get axes and bounds-------------------
latax_bounds=latax.getBounds()
delta_y=latax_bounds.ptp(axis=1)*numpy.pi/180.*R
#-------Repeat array to get slab---------------
delta_y=MV.repeat(delta_y[:,None],len(lonax),axis=1)
delta_y.setAxisList((latax,lonax))
return delta_y
#------Sort points of 2D coordinates to form a continuous line------------
def getLineFromPoints(points,reverse=False,verbose=True):
'''Sort points of 2D coordinates to form a continuous line.
<points>: Nx2 nd-array, coordinates of (y,x) or (x,y).
<reverse>: bool, if True, reverse the line orientation.
Return <path>: Nx2 nd-array, ordered coordinates.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2017-11-02 15:57:02.
'''
import numpy
def computePathCost(coords,startidx=0):
'''Compute the cost of path starting from a given start point.
<coords>: list of (y,x) or (x,y) coordinates.
<startidx>: int, index from <coords> as starting point.
Return <path>: orientated coordinates based on nearest neighbors.
<cost>: total squared distances following points in <path>.
'''
def distance(P1, P2):
return (P1[0] - P2[0])**2 + (P1[1] - P2[1])**2
import copy
coord_list=copy.copy(coords)
pass_by=coord_list
path=[coord_list[startidx],]
pass_by.remove(coord_list[startidx])
cost=0
while len(pass_by)>0:
nearest=min(pass_by,key=lambda x: distance(path[-1],x))
cost+=distance(nearest,path[-1])
path.append(nearest)
pass_by.remove(nearest)
return path,cost
N=len(points)
coords=[(points[ii][0],points[ii][1]) for ii in range(N)]
paths=[]
cost=numpy.inf
#-----------Loop through starting points-----------
for ii in range(N):
pathii,costii=computePathCost(coords,ii)
paths.append(pathii)
if costii<cost:
cost=costii
path=pathii
path=numpy.array(path)
if reverse:
path=path[::-1]
return path
#--------------------Get contour from a binary mask--------------------
def getBinContour(mask,lons=None,lats=None,return_largest=True):
'''Get contour from a binary mask
<mask>: 2d array, binary mask.
<lons>,<lats>: 1d array, x, y coordinates for <mask>.
Return <cont>: Nx2 array, coordinates of the contour of the largest
continuous region represented by 1s in <mask>.
'''
import matplotlib.pyplot as plt
import numpy
assert numpy.ndim(mask)==2, "<mask> needs to be 2D."
if lons is not None:
assert numpy.ndim(lons)==1, "<lons> needs to be 1D."
assert len(lons)==mask.shape[1], "<lons> doesn't match <mask> shape."
if lats is not None:
assert numpy.ndim(lats)==1, "<lats> needs to be 1D."
assert len(lats)==mask.shape[0], "<lats> doesn't match <mask> shape."
fig,ax=plt.subplots()
if lons is None:
lons=numpy.arange(mask.shape[1])
if lats is None:
lats=numpy.arange(mask.shape[0])
cs=ax.contourf(lons,lats,mask,[0.9,1.1]).collections
conts=cs[0].get_paths()
if return_largest:
conts.sort(key=lambda x:len(x.vertices))
#cont=conts[-1].vertices
cont=conts[-1]
else:
cont=conts
ax.cla()
plt.close(fig)
return cont
#-----------Find index of value in array-----------
def findIndex(x,a):
'''Find index of value in array
<x>: scalar, value to search.
<a>: 1d array.
Return <idx>: int, index in <a> that a[idx] is closest to <x>.
If <idx> is 0 or len(a)-1, and <x> is too far from the
closest value, return None.
'''
import numpy
if not numpy.isscalar(x):
raise Exception("<x> needs to be scalar.")
if numpy.ndim(a)!=1:
raise Exception("<a> needs to be 1d array.")
idx=numpy.argmin(abs(x-a))
if idx==0 and abs(a[0]-x) > abs(a[1]-a[0]):
idx=None
#raise Exception("<x> not in range of <a>.")
if idx==len(a)-1 and abs(x-a[-1]) > abs(a[-1]-a[-2]):
idx=None
#raise Exception("<x> not in range of <a>.")
return idx
def getBearing(lat1,lon1,lat2,lon2):
'''Compute bearing from point 1 to point2
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
Return <theta>: (forward) bearing in degree.
NOTE that the bearing from P1 to P2 is in general not the same as that
from P2 to P1.
'''
import numpy as np
from numpy import sin, cos
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=lon2-lon1
theta=np.arctan2(sin(dlon)*cos(lat2),
cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(dlon))
theta=theta/np.pi*180
theta=(theta+360)%360
return theta
def getCrossTrackDistance(lat1,lon1,lat2,lon2,lat3,lon3,r=None):
'''Compute cross-track distance
<lat1>, <lon1>: scalar float or nd-array, latitudes and longitudes in
degree, start point of the great circle.
<lat2>, <lon2>: scalar float or nd-array, latitudes and longitudes in
degree, end point of the great circle.
<lat3>, <lon3>: scalar float or nd-array, latitudes and longitudes in
degree, a point away from the great circle.
Return <dxt>: great cicle distance between point P3 to the closest point
on great circle that connects P1 and P2.
NOTE that the sign of dxt tells which side of the 3rd point
P3 is on.
See also getCrossTrackPoint(), getAlongTrackDistance().
'''
import numpy as np
from numpy import sin
if r is None:
r=6371000. #m
# get angular distance between P1 and P3
delta13=greatCircle(lat1,lon1,lat3,lon3,r=1.)
# bearing between P1, P3
theta13=getBearing(lat1,lon1,lat3,lon3)*np.pi/180
# bearing between P1, P2
theta12=getBearing(lat1,lon1,lat2,lon2)*np.pi/180
dtheta=np.arcsin(sin(delta13)*sin(theta13-theta12))
dxt=r*dtheta
return dxt
| addExtraAxis | identifier_name |
funcs.py | from __future__ import print_function
class DataError(Exception):
def __init__(self, string=None):
if string != None:
self.message = string
def __str__(self):
return self.message
def get3DEllipse(t,y,x):
import numpy as np
at=np.arange(-t,t+1)
ax=np.arange(-x,x+1)
ay=np.arange(-y,y+1)
T,Y,X=np.meshgrid(at,ay,ax,indexing='ij')
dd=(X/float(x))**2+(Y/float(y))**2+(T/float(t))**2
return np.where(dd<=1,1,0)
def getQuantiles(slab,percents=None,verbose=True):
'''Find quantiles of a slab
<slab>: ndarray, whose quantiles will be found.
<percents>: float or a list of floats, left percentage(s). Right quantiles
will be found by (1-percentage).
Return <quantiles>: nested list of left and right quantiles for corresponding
percentages.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2018-05-18 12:55:31.
'''
import numpy
if percents is None:
percents=numpy.array([0.001,0.005,0.01,0.025,0.05,0.1])
percents=numpy.array(percents)
if percents.ndim!=1:
raise Exception("<percents> needs to be a 1D array.")
#-------Remove nans and masked values--------
mask=getMissingMask(slab)
slab=numpy.array(slab)
slab=slab[numpy.where(mask==False)]
flatten=slab.flatten()
flatten.sort()
n=len(flatten)
qlidx=(n*percents).astype('int')
qridx=(n*(1-percents)).astype('int')
ql=flatten[qlidx]
qr=flatten[qridx]
quantiles=zip(ql,qr)
if verbose:
for ii,pii in enumerate(percents): | return quantiles
#-------Copies selected attributes from source object to dict--
def attribute_obj2dict(source_object,dictionary=None,verbose=False):
'''Copies selected attributes from source object to dict
to <dictionary>.
<source_object>: object from which attributes are copied.
<dictionary>: None or dict. If None, create a new dict to store
the result. If a dict, use attributes from <source_object>
to overwrite or fill the dict.
Update time: 2016-01-18 11:00:55.
'''
if dictionary is None:
dictionary={}
#------------------Attribute list------------------
att_list=['name','id','dataset','source','title','long_name','standard_name',\
'units','syno','end','harms','filename','comments','description']
#-----------------Copy attributes-----------------
for att in att_list:
if hasattr(source_object,att):
dictionary[att]=getattr(source_object,att).strip()
if verbose:
print('\n# <attribute_obj2dict>: %s: %s' %(att, dictionary[att]))
return dictionary
#-------------Copy attributes from dict to target object----------
def attribute_dict2obj(dictionary,target_object,verbose=False):
'''Copies attributes from dictionary to target object.
<dictionary>: dict, contains attributes to copy.
<target_object>: obj, attributes are copied to.
Return <target_object>: target object with new attributes.
Update time: 2016-01-18 11:31:25.
'''
for att in dictionary.keys():
setattr(target_object,att,dictionary[att])
if verbose:
print('\n# <attribute_dict2obj>: Copy attribute: %s = %s' %(att,dictionary[att]))
return target_object
#-------------------Add an extra axis to a data slab -------------
def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):
"""Adds an extra axis to a data slab.
<slab>: variable to which the axis is to insert.
<newaxis>: axis object, could be of any length. If None, create a dummy
singleton axis.
<axis>: index of axis to be inserted, e.g. 0 if <newaxis> is inserted
as the 1st dimension.
Return: <slab2>.
Update time: 2013-10-09 12:34:32.
"""
import cdms2 as cdms
import MV2 as MV
if newaxis is None:
newaxis=cdms.createAxis([1,])
newaxis.units=''
# add new axis to axis list of input <slab>
axislist=slab.getAxisList()
axislist.insert(axis,newaxis)
#----------------Reshape----------------
shape=list(slab.shape)
shape.insert(axis,len(newaxis))
slab2=MV.reshape(slab,shape)
#------------Create variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
'''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
return axis
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1-lon2)
numerator=(cos(lat2)*sin(dlon))**2 + \
(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon))**2
numerator=np.sqrt(numerator)
denominator=sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(dlon)
dsigma=np.arctan2(numerator,denominator)
arc=r*dsigma
if verbose:
print('\n# <greatCircle>: <dsigma>:',dsigma)
print('# <greatCircle>: <arc>:', arc)
return arc
#----------------------Get a slab from a variable----------------------
def getSlab(var,index1=-1,index2=-2,verbose=True):
'''Get a slab from a variable
<var>: nd array with dimension >=2.
<index1>,<index2>: str, indices denoting the dimensions from which a slab is to slice.
Return <slab>: the (1st) slab from <var>.
E.g. <var> has dimension (12,1,241,480), getSlab(var) will
return the 1st time point with singleton dimension squeezed.
Update time: 2015-07-14 19:23:42.
'''
import numpy
ndim=numpy.ndim(var)
if ndim<2:
raise DataError('Dimension in <var> is smaller than 2.')
if ndim==2:
return var
slab='dummy'
slicer=['0',]*ndim
slicer[index1]=':'
slicer[index2]=':'
string='slab=var[%s]' %','.join(slicer)
exec(string)
return slab
#-----------------Change latitude axis to south-to-north---------------------------
def increasingLatitude(slab,verbose=False):
'''Changes a slab so that is always has latitude running from
south to north.
<slab>: input transientvariable. Need to have a proper latitude axis.
Return: <slab2>, if latitude axis is reversed, or <slab> otherwise.
If <slab> has a latitude axis, and the latitudes run from north to south, a
copy <slab2> is made with the latitudes reversed, i.e., running from south
to north.
Update time: 2016-01-18 11:58:11.
'''
latax=getAxis('lat',slab)
'''
try:
latax=slab.getLatitude()
except:
raise DataError('Failed to obtain latitude axis from <slab>.')
if latax is None:
raise DataError('Failed to obtain latitude axis from <slab>.')
'''
#-----Reverse latitudes if necessary------------------
if latax[0]>latax[-1]:
if verbose:
print('\n# <increasingLatitude>: Reversing latitude axis.')
slab2=slab(latitude=(latax[-1],latax[0]))
return slab2
else:
if verbose:
print('\n# <increasingLatitude>: Latitude axis correct. Not changing.')
return slab
#----------Delta_Latitude----------------------------
def dLongitude(var,side='c',R=6371000):
'''Return a slab of longitudinal increment (meter) delta_x.
<var>: variable from which latitude axis is obtained;
<side>: 'n': northern boundary of each latitudinal band;
's': southern boundary of each latitudinal band;
'c': central line of latitudinal band;
----- 'n'
/-----\ 'c'
/_______\ 's'
<R>: radius of Earth;
Return <delta_x>, a 2-D slab with grid information copied from\
<var>.
UPDATE: 2014-08-05 11:12:27:
In computing <delta_x>, the longitudinal increment should be taken
from the actual longitude axis (bounds).
Fortunately this is not affecting any previous computations which are all
globally.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#----------Get axes---------------------
var=increasingLatitude(var)
lonax=var.getLongitude()
latax_bounds=latax.getBounds()
lonax_bounds=lonax.getBounds()
lon_increment=numpy.ptp(lonax_bounds,axis=1)*numpy.pi/180.
if side=='n':
lats=latax_bounds.max(axis=1)
elif side=='c':
lats=latax[:]
elif side=='s':
lats=latax_bounds.min(axis=1)
lats=abs(lats)*numpy.pi/180.
delta_x=R*numpy.cos(lats)[:,None]*lon_increment[None,:]
delta_x=MV.where(delta_x<=1e-8,1,delta_x)
delta_x.setAxisList((latax,lonax))
return delta_x
#----------Delta_Longitude----------------------------
def dLatitude(var,R=6371000,verbose=True):
'''Return a slab of latitudinal increment (meter) delta_y.
<var>: variable from which latitude axis is abtained;
<R>: radius of Earth;
Return <delta_y>, a 2-D slab with grid information copied from\
<var>.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#---------Get axes and bounds-------------------
latax_bounds=latax.getBounds()
delta_y=latax_bounds.ptp(axis=1)*numpy.pi/180.*R
#-------Repeat array to get slab---------------
delta_y=MV.repeat(delta_y[:,None],len(lonax),axis=1)
delta_y.setAxisList((latax,lonax))
return delta_y
#------Sort points of 2D coordinates to form a continuous line------------
def getLineFromPoints(points,reverse=False,verbose=True):
'''Sort points of 2D coordinates to form a continuous line.
<points>: Nx2 nd-array, coordinates of (y,x) or (x,y).
<reverse>: bool, if True, reverse the line orientation.
Return <path>: Nx2 nd-array, ordered coordinates.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2017-11-02 15:57:02.
'''
import numpy
def computePathCost(coords,startidx=0):
'''Compute the cost of path starting from a given start point.
<coords>: list of (y,x) or (x,y) coordinates.
<startidx>: int, index from <coords> as starting point.
Return <path>: orientated coordinates based on nearest neighbors.
<cost>: total squared distances following points in <path>.
'''
def distance(P1, P2):
return (P1[0] - P2[0])**2 + (P1[1] - P2[1])**2
import copy
coord_list=copy.copy(coords)
pass_by=coord_list
path=[coord_list[startidx],]
pass_by.remove(coord_list[startidx])
cost=0
while len(pass_by)>0:
nearest=min(pass_by,key=lambda x: distance(path[-1],x))
cost+=distance(nearest,path[-1])
path.append(nearest)
pass_by.remove(nearest)
return path,cost
N=len(points)
coords=[(points[ii][0],points[ii][1]) for ii in range(N)]
paths=[]
cost=numpy.inf
#-----------Loop through starting points-----------
for ii in range(N):
pathii,costii=computePathCost(coords,ii)
paths.append(pathii)
if costii<cost:
cost=costii
path=pathii
path=numpy.array(path)
if reverse:
path=path[::-1]
return path
#--------------------Get contour from a binary mask--------------------
def getBinContour(mask,lons=None,lats=None,return_largest=True):
'''Get contour from a binary mask
<mask>: 2d array, binary mask.
<lons>,<lats>: 1d array, x, y coordinates for <mask>.
Return <cont>: Nx2 array, coordinates of the contour of the largest
continuous region represented by 1s in <mask>.
'''
import matplotlib.pyplot as plt
import numpy
assert numpy.ndim(mask)==2, "<mask> needs to be 2D."
if lons is not None:
assert numpy.ndim(lons)==1, "<lons> needs to be 1D."
assert len(lons)==mask.shape[1], "<lons> doesn't match <mask> shape."
if lats is not None:
assert numpy.ndim(lats)==1, "<lats> needs to be 1D."
assert len(lats)==mask.shape[0], "<lats> doesn't match <mask> shape."
fig,ax=plt.subplots()
if lons is None:
lons=numpy.arange(mask.shape[1])
if lats is None:
lats=numpy.arange(mask.shape[0])
cs=ax.contourf(lons,lats,mask,[0.9,1.1]).collections
conts=cs[0].get_paths()
if return_largest:
conts.sort(key=lambda x:len(x.vertices))
#cont=conts[-1].vertices
cont=conts[-1]
else:
cont=conts
ax.cla()
plt.close(fig)
return cont
#-----------Find index of value in array-----------
def findIndex(x,a):
'''Find index of value in array
<x>: scalar, value to search.
<a>: 1d array.
Return <idx>: int, index in <a> that a[idx] is closest to <x>.
If <idx> is 0 or len(a)-1, and <x> is too far from the
closest value, return None.
'''
import numpy
if not numpy.isscalar(x):
raise Exception("<x> needs to be scalar.")
if numpy.ndim(a)!=1:
raise Exception("<a> needs to be 1d array.")
idx=numpy.argmin(abs(x-a))
if idx==0 and abs(a[0]-x) > abs(a[1]-a[0]):
idx=None
#raise Exception("<x> not in range of <a>.")
if idx==len(a)-1 and abs(x-a[-1]) > abs(a[-1]-a[-2]):
idx=None
#raise Exception("<x> not in range of <a>.")
return idx
def getBearing(lat1,lon1,lat2,lon2):
'''Compute bearing from point 1 to point2
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
Return <theta>: (forward) bearing in degree.
NOTE that the bearing from P1 to P2 is in general not the same as that
from P2 to P1.
'''
import numpy as np
from numpy import sin, cos
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=lon2-lon1
theta=np.arctan2(sin(dlon)*cos(lat2),
cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(dlon))
theta=theta/np.pi*180
theta=(theta+360)%360
return theta
def getCrossTrackDistance(lat1,lon1,lat2,lon2,lat3,lon3,r=None):
'''Compute cross-track distance
<lat1>, <lon1>: scalar float or nd-array, latitudes and longitudes in
degree, start point of the great circle.
<lat2>, <lon2>: scalar float or nd-array, latitudes and longitudes in
degree, end point of the great circle.
<lat3>, <lon3>: scalar float or nd-array, latitudes and longitudes in
degree, a point away from the great circle.
Return <dxt>: great cicle distance between point P3 to the closest point
on great circle that connects P1 and P2.
NOTE that the sign of dxt tells which side of the 3rd point
P3 is on.
See also getCrossTrackPoint(), getAlongTrackDistance().
'''
import numpy as np
from numpy import sin
if r is None:
r=6371000. #m
# get angular distance between P1 and P3
delta13=greatCircle(lat1,lon1,lat3,lon3,r=1.)
# bearing between P1, P3
theta13=getBearing(lat1,lon1,lat3,lon3)*np.pi/180
# bearing between P1, P2
theta12=getBearing(lat1,lon1,lat2,lon2)*np.pi/180
dtheta=np.arcsin(sin(delta13)*sin(theta13-theta12))
dxt=r*dtheta
return dxt | print('# <getQuantiles>: %0.3f left quantile: %f. %0.3f right quantile: %f.'\
%(pii,ql[ii],1-pii,qr[ii]))
| random_line_split |
funcs.py | from __future__ import print_function
class DataError(Exception):
def __init__(self, string=None):
if string != None:
self.message = string
def __str__(self):
return self.message
def get3DEllipse(t,y,x):
import numpy as np
at=np.arange(-t,t+1)
ax=np.arange(-x,x+1)
ay=np.arange(-y,y+1)
T,Y,X=np.meshgrid(at,ay,ax,indexing='ij')
dd=(X/float(x))**2+(Y/float(y))**2+(T/float(t))**2
return np.where(dd<=1,1,0)
def getQuantiles(slab,percents=None,verbose=True):
'''Find quantiles of a slab
<slab>: ndarray, whose quantiles will be found.
<percents>: float or a list of floats, left percentage(s). Right quantiles
will be found by (1-percentage).
Return <quantiles>: nested list of left and right quantiles for corresponding
percentages.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2018-05-18 12:55:31.
'''
import numpy
if percents is None:
percents=numpy.array([0.001,0.005,0.01,0.025,0.05,0.1])
percents=numpy.array(percents)
if percents.ndim!=1:
raise Exception("<percents> needs to be a 1D array.")
#-------Remove nans and masked values--------
mask=getMissingMask(slab)
slab=numpy.array(slab)
slab=slab[numpy.where(mask==False)]
flatten=slab.flatten()
flatten.sort()
n=len(flatten)
qlidx=(n*percents).astype('int')
qridx=(n*(1-percents)).astype('int')
ql=flatten[qlidx]
qr=flatten[qridx]
quantiles=zip(ql,qr)
if verbose:
for ii,pii in enumerate(percents):
print('# <getQuantiles>: %0.3f left quantile: %f. %0.3f right quantile: %f.'\
%(pii,ql[ii],1-pii,qr[ii]))
return quantiles
#-------Copies selected attributes from source object to dict--
def attribute_obj2dict(source_object,dictionary=None,verbose=False):
'''Copies selected attributes from source object to dict
to <dictionary>.
<source_object>: object from which attributes are copied.
<dictionary>: None or dict. If None, create a new dict to store
the result. If a dict, use attributes from <source_object>
to overwrite or fill the dict.
Update time: 2016-01-18 11:00:55.
'''
if dictionary is None:
dictionary={}
#------------------Attribute list------------------
att_list=['name','id','dataset','source','title','long_name','standard_name',\
'units','syno','end','harms','filename','comments','description']
#-----------------Copy attributes-----------------
for att in att_list:
if hasattr(source_object,att):
dictionary[att]=getattr(source_object,att).strip()
if verbose:
print('\n# <attribute_obj2dict>: %s: %s' %(att, dictionary[att]))
return dictionary
#-------------Copy attributes from dict to target object----------
def attribute_dict2obj(dictionary,target_object,verbose=False):
'''Copies attributes from dictionary to target object.
<dictionary>: dict, contains attributes to copy.
<target_object>: obj, attributes are copied to.
Return <target_object>: target object with new attributes.
Update time: 2016-01-18 11:31:25.
'''
for att in dictionary.keys():
setattr(target_object,att,dictionary[att])
if verbose:
print('\n# <attribute_dict2obj>: Copy attribute: %s = %s' %(att,dictionary[att]))
return target_object
#-------------------Add an extra axis to a data slab -------------
def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):
"""Adds an extra axis to a data slab.
<slab>: variable to which the axis is to insert.
<newaxis>: axis object, could be of any length. If None, create a dummy
singleton axis.
<axis>: index of axis to be inserted, e.g. 0 if <newaxis> is inserted
as the 1st dimension.
Return: <slab2>.
Update time: 2013-10-09 12:34:32.
"""
import cdms2 as cdms
import MV2 as MV
if newaxis is None:
newaxis=cdms.createAxis([1,])
newaxis.units=''
# add new axis to axis list of input <slab>
axislist=slab.getAxisList()
axislist.insert(axis,newaxis)
#----------------Reshape----------------
shape=list(slab.shape)
shape.insert(axis,len(newaxis))
slab2=MV.reshape(slab,shape)
#------------Create variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
'''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
|
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1-lon2)
numerator=(cos(lat2)*sin(dlon))**2 + \
(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon))**2
numerator=np.sqrt(numerator)
denominator=sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(dlon)
dsigma=np.arctan2(numerator,denominator)
arc=r*dsigma
if verbose:
print('\n# <greatCircle>: <dsigma>:',dsigma)
print('# <greatCircle>: <arc>:', arc)
return arc
#----------------------Get a slab from a variable----------------------
def getSlab(var,index1=-1,index2=-2,verbose=True):
'''Get a slab from a variable
<var>: nd array with dimension >=2.
<index1>,<index2>: str, indices denoting the dimensions from which a slab is to slice.
Return <slab>: the (1st) slab from <var>.
E.g. <var> has dimension (12,1,241,480), getSlab(var) will
return the 1st time point with singleton dimension squeezed.
Update time: 2015-07-14 19:23:42.
'''
import numpy
ndim=numpy.ndim(var)
if ndim<2:
raise DataError('Dimension in <var> is smaller than 2.')
if ndim==2:
return var
slab='dummy'
slicer=['0',]*ndim
slicer[index1]=':'
slicer[index2]=':'
string='slab=var[%s]' %','.join(slicer)
exec(string)
return slab
#-----------------Change latitude axis to south-to-north---------------------------
def increasingLatitude(slab,verbose=False):
'''Changes a slab so that is always has latitude running from
south to north.
<slab>: input transientvariable. Need to have a proper latitude axis.
Return: <slab2>, if latitude axis is reversed, or <slab> otherwise.
If <slab> has a latitude axis, and the latitudes run from north to south, a
copy <slab2> is made with the latitudes reversed, i.e., running from south
to north.
Update time: 2016-01-18 11:58:11.
'''
latax=getAxis('lat',slab)
'''
try:
latax=slab.getLatitude()
except:
raise DataError('Failed to obtain latitude axis from <slab>.')
if latax is None:
raise DataError('Failed to obtain latitude axis from <slab>.')
'''
#-----Reverse latitudes if necessary------------------
if latax[0]>latax[-1]:
if verbose:
print('\n# <increasingLatitude>: Reversing latitude axis.')
slab2=slab(latitude=(latax[-1],latax[0]))
return slab2
else:
if verbose:
print('\n# <increasingLatitude>: Latitude axis correct. Not changing.')
return slab
#----------Delta_Latitude----------------------------
def dLongitude(var,side='c',R=6371000):
'''Return a slab of longitudinal increment (meter) delta_x.
<var>: variable from which latitude axis is obtained;
<side>: 'n': northern boundary of each latitudinal band;
's': southern boundary of each latitudinal band;
'c': central line of latitudinal band;
----- 'n'
/-----\ 'c'
/_______\ 's'
<R>: radius of Earth;
Return <delta_x>, a 2-D slab with grid information copied from\
<var>.
UPDATE: 2014-08-05 11:12:27:
In computing <delta_x>, the longitudinal increment should be taken
from the actual longitude axis (bounds).
Fortunately this is not affecting any previous computations which are all
globally.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#----------Get axes---------------------
var=increasingLatitude(var)
lonax=var.getLongitude()
latax_bounds=latax.getBounds()
lonax_bounds=lonax.getBounds()
lon_increment=numpy.ptp(lonax_bounds,axis=1)*numpy.pi/180.
if side=='n':
lats=latax_bounds.max(axis=1)
elif side=='c':
lats=latax[:]
elif side=='s':
lats=latax_bounds.min(axis=1)
lats=abs(lats)*numpy.pi/180.
delta_x=R*numpy.cos(lats)[:,None]*lon_increment[None,:]
delta_x=MV.where(delta_x<=1e-8,1,delta_x)
delta_x.setAxisList((latax,lonax))
return delta_x
#----------Delta_Longitude----------------------------
def dLatitude(var,R=6371000,verbose=True):
'''Return a slab of latitudinal increment (meter) delta_y.
<var>: variable from which latitude axis is abtained;
<R>: radius of Earth;
Return <delta_y>, a 2-D slab with grid information copied from\
<var>.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#---------Get axes and bounds-------------------
latax_bounds=latax.getBounds()
delta_y=latax_bounds.ptp(axis=1)*numpy.pi/180.*R
#-------Repeat array to get slab---------------
delta_y=MV.repeat(delta_y[:,None],len(lonax),axis=1)
delta_y.setAxisList((latax,lonax))
return delta_y
#------Sort points of 2D coordinates to form a continuous line------------
def getLineFromPoints(points,reverse=False,verbose=True):
'''Sort points of 2D coordinates to form a continuous line.
<points>: Nx2 nd-array, coordinates of (y,x) or (x,y).
<reverse>: bool, if True, reverse the line orientation.
Return <path>: Nx2 nd-array, ordered coordinates.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2017-11-02 15:57:02.
'''
import numpy
def computePathCost(coords,startidx=0):
'''Compute the cost of path starting from a given start point.
<coords>: list of (y,x) or (x,y) coordinates.
<startidx>: int, index from <coords> as starting point.
Return <path>: orientated coordinates based on nearest neighbors.
<cost>: total squared distances following points in <path>.
'''
def distance(P1, P2):
return (P1[0] - P2[0])**2 + (P1[1] - P2[1])**2
import copy
coord_list=copy.copy(coords)
pass_by=coord_list
path=[coord_list[startidx],]
pass_by.remove(coord_list[startidx])
cost=0
while len(pass_by)>0:
nearest=min(pass_by,key=lambda x: distance(path[-1],x))
cost+=distance(nearest,path[-1])
path.append(nearest)
pass_by.remove(nearest)
return path,cost
N=len(points)
coords=[(points[ii][0],points[ii][1]) for ii in range(N)]
paths=[]
cost=numpy.inf
#-----------Loop through starting points-----------
for ii in range(N):
pathii,costii=computePathCost(coords,ii)
paths.append(pathii)
if costii<cost:
cost=costii
path=pathii
path=numpy.array(path)
if reverse:
path=path[::-1]
return path
#--------------------Get contour from a binary mask--------------------
def getBinContour(mask,lons=None,lats=None,return_largest=True):
'''Get contour from a binary mask
<mask>: 2d array, binary mask.
<lons>,<lats>: 1d array, x, y coordinates for <mask>.
Return <cont>: Nx2 array, coordinates of the contour of the largest
continuous region represented by 1s in <mask>.
'''
import matplotlib.pyplot as plt
import numpy
assert numpy.ndim(mask)==2, "<mask> needs to be 2D."
if lons is not None:
assert numpy.ndim(lons)==1, "<lons> needs to be 1D."
assert len(lons)==mask.shape[1], "<lons> doesn't match <mask> shape."
if lats is not None:
assert numpy.ndim(lats)==1, "<lats> needs to be 1D."
assert len(lats)==mask.shape[0], "<lats> doesn't match <mask> shape."
fig,ax=plt.subplots()
if lons is None:
lons=numpy.arange(mask.shape[1])
if lats is None:
lats=numpy.arange(mask.shape[0])
cs=ax.contourf(lons,lats,mask,[0.9,1.1]).collections
conts=cs[0].get_paths()
if return_largest:
conts.sort(key=lambda x:len(x.vertices))
#cont=conts[-1].vertices
cont=conts[-1]
else:
cont=conts
ax.cla()
plt.close(fig)
return cont
#-----------Find index of value in array-----------
def findIndex(x,a):
'''Find index of value in array
<x>: scalar, value to search.
<a>: 1d array.
Return <idx>: int, index in <a> that a[idx] is closest to <x>.
If <idx> is 0 or len(a)-1, and <x> is too far from the
closest value, return None.
'''
import numpy
if not numpy.isscalar(x):
raise Exception("<x> needs to be scalar.")
if numpy.ndim(a)!=1:
raise Exception("<a> needs to be 1d array.")
idx=numpy.argmin(abs(x-a))
if idx==0 and abs(a[0]-x) > abs(a[1]-a[0]):
idx=None
#raise Exception("<x> not in range of <a>.")
if idx==len(a)-1 and abs(x-a[-1]) > abs(a[-1]-a[-2]):
idx=None
#raise Exception("<x> not in range of <a>.")
return idx
def getBearing(lat1,lon1,lat2,lon2):
'''Compute bearing from point 1 to point2
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
Return <theta>: (forward) bearing in degree.
NOTE that the bearing from P1 to P2 is in general not the same as that
from P2 to P1.
'''
import numpy as np
from numpy import sin, cos
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=lon2-lon1
theta=np.arctan2(sin(dlon)*cos(lat2),
cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(dlon))
theta=theta/np.pi*180
theta=(theta+360)%360
return theta
def getCrossTrackDistance(lat1,lon1,lat2,lon2,lat3,lon3,r=None):
'''Compute cross-track distance
<lat1>, <lon1>: scalar float or nd-array, latitudes and longitudes in
degree, start point of the great circle.
<lat2>, <lon2>: scalar float or nd-array, latitudes and longitudes in
degree, end point of the great circle.
<lat3>, <lon3>: scalar float or nd-array, latitudes and longitudes in
degree, a point away from the great circle.
Return <dxt>: great cicle distance between point P3 to the closest point
on great circle that connects P1 and P2.
NOTE that the sign of dxt tells which side of the 3rd point
P3 is on.
See also getCrossTrackPoint(), getAlongTrackDistance().
'''
import numpy as np
from numpy import sin
if r is None:
r=6371000. #m
# get angular distance between P1 and P3
delta13=greatCircle(lat1,lon1,lat3,lon3,r=1.)
# bearing between P1, P3
theta13=getBearing(lat1,lon1,lat3,lon3)*np.pi/180
# bearing between P1, P2
theta12=getBearing(lat1,lon1,lat2,lon2)*np.pi/180
dtheta=np.arcsin(sin(delta13)*sin(theta13-theta12))
dxt=r*dtheta
return dxt
| return axis | conditional_block |
funcs.py | from __future__ import print_function
class DataError(Exception):
def __init__(self, string=None):
if string != None:
self.message = string
def __str__(self):
return self.message
def get3DEllipse(t,y,x):
import numpy as np
at=np.arange(-t,t+1)
ax=np.arange(-x,x+1)
ay=np.arange(-y,y+1)
T,Y,X=np.meshgrid(at,ay,ax,indexing='ij')
dd=(X/float(x))**2+(Y/float(y))**2+(T/float(t))**2
return np.where(dd<=1,1,0)
def getQuantiles(slab,percents=None,verbose=True):
'''Find quantiles of a slab
<slab>: ndarray, whose quantiles will be found.
<percents>: float or a list of floats, left percentage(s). Right quantiles
will be found by (1-percentage).
Return <quantiles>: nested list of left and right quantiles for corresponding
percentages.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2018-05-18 12:55:31.
'''
import numpy
if percents is None:
percents=numpy.array([0.001,0.005,0.01,0.025,0.05,0.1])
percents=numpy.array(percents)
if percents.ndim!=1:
raise Exception("<percents> needs to be a 1D array.")
#-------Remove nans and masked values--------
mask=getMissingMask(slab)
slab=numpy.array(slab)
slab=slab[numpy.where(mask==False)]
flatten=slab.flatten()
flatten.sort()
n=len(flatten)
qlidx=(n*percents).astype('int')
qridx=(n*(1-percents)).astype('int')
ql=flatten[qlidx]
qr=flatten[qridx]
quantiles=zip(ql,qr)
if verbose:
for ii,pii in enumerate(percents):
print('# <getQuantiles>: %0.3f left quantile: %f. %0.3f right quantile: %f.'\
%(pii,ql[ii],1-pii,qr[ii]))
return quantiles
#-------Copies selected attributes from source object to dict--
def attribute_obj2dict(source_object,dictionary=None,verbose=False):
'''Copies selected attributes from source object to dict
to <dictionary>.
<source_object>: object from which attributes are copied.
<dictionary>: None or dict. If None, create a new dict to store
the result. If a dict, use attributes from <source_object>
to overwrite or fill the dict.
Update time: 2016-01-18 11:00:55.
'''
if dictionary is None:
dictionary={}
#------------------Attribute list------------------
att_list=['name','id','dataset','source','title','long_name','standard_name',\
'units','syno','end','harms','filename','comments','description']
#-----------------Copy attributes-----------------
for att in att_list:
if hasattr(source_object,att):
dictionary[att]=getattr(source_object,att).strip()
if verbose:
print('\n# <attribute_obj2dict>: %s: %s' %(att, dictionary[att]))
return dictionary
#-------------Copy attributes from dict to target object----------
def attribute_dict2obj(dictionary,target_object,verbose=False):
'''Copies attributes from dictionary to target object.
<dictionary>: dict, contains attributes to copy.
<target_object>: obj, attributes are copied to.
Return <target_object>: target object with new attributes.
Update time: 2016-01-18 11:31:25.
'''
for att in dictionary.keys():
setattr(target_object,att,dictionary[att])
if verbose:
print('\n# <attribute_dict2obj>: Copy attribute: %s = %s' %(att,dictionary[att]))
return target_object
#-------------------Add an extra axis to a data slab -------------
def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):
"""Adds an extra axis to a data slab.
<slab>: variable to which the axis is to insert.
<newaxis>: axis object, could be of any length. If None, create a dummy
singleton axis.
<axis>: index of axis to be inserted, e.g. 0 if <newaxis> is inserted
as the 1st dimension.
Return: <slab2>.
Update time: 2013-10-09 12:34:32.
"""
import cdms2 as cdms
import MV2 as MV
if newaxis is None:
newaxis=cdms.createAxis([1,])
newaxis.units=''
# add new axis to axis list of input <slab>
axislist=slab.getAxisList()
axislist.insert(axis,newaxis)
#----------------Reshape----------------
shape=list(slab.shape)
shape.insert(axis,len(newaxis))
slab2=MV.reshape(slab,shape)
#------------Create variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
|
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
return axis
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1-lon2)
numerator=(cos(lat2)*sin(dlon))**2 + \
(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon))**2
numerator=np.sqrt(numerator)
denominator=sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(dlon)
dsigma=np.arctan2(numerator,denominator)
arc=r*dsigma
if verbose:
print('\n# <greatCircle>: <dsigma>:',dsigma)
print('# <greatCircle>: <arc>:', arc)
return arc
#----------------------Get a slab from a variable----------------------
def getSlab(var,index1=-1,index2=-2,verbose=True):
'''Get a slab from a variable
<var>: nd array with dimension >=2.
<index1>,<index2>: str, indices denoting the dimensions from which a slab is to slice.
Return <slab>: the (1st) slab from <var>.
E.g. <var> has dimension (12,1,241,480), getSlab(var) will
return the 1st time point with singleton dimension squeezed.
Update time: 2015-07-14 19:23:42.
'''
import numpy
ndim=numpy.ndim(var)
if ndim<2:
raise DataError('Dimension in <var> is smaller than 2.')
if ndim==2:
return var
slab='dummy'
slicer=['0',]*ndim
slicer[index1]=':'
slicer[index2]=':'
string='slab=var[%s]' %','.join(slicer)
exec(string)
return slab
#-----------------Change latitude axis to south-to-north---------------------------
def increasingLatitude(slab,verbose=False):
'''Changes a slab so that is always has latitude running from
south to north.
<slab>: input transientvariable. Need to have a proper latitude axis.
Return: <slab2>, if latitude axis is reversed, or <slab> otherwise.
If <slab> has a latitude axis, and the latitudes run from north to south, a
copy <slab2> is made with the latitudes reversed, i.e., running from south
to north.
Update time: 2016-01-18 11:58:11.
'''
latax=getAxis('lat',slab)
'''
try:
latax=slab.getLatitude()
except:
raise DataError('Failed to obtain latitude axis from <slab>.')
if latax is None:
raise DataError('Failed to obtain latitude axis from <slab>.')
'''
#-----Reverse latitudes if necessary------------------
if latax[0]>latax[-1]:
if verbose:
print('\n# <increasingLatitude>: Reversing latitude axis.')
slab2=slab(latitude=(latax[-1],latax[0]))
return slab2
else:
if verbose:
print('\n# <increasingLatitude>: Latitude axis correct. Not changing.')
return slab
#----------Delta_Latitude----------------------------
def dLongitude(var,side='c',R=6371000):
'''Return a slab of longitudinal increment (meter) delta_x.
<var>: variable from which latitude axis is obtained;
<side>: 'n': northern boundary of each latitudinal band;
's': southern boundary of each latitudinal band;
'c': central line of latitudinal band;
----- 'n'
/-----\ 'c'
/_______\ 's'
<R>: radius of Earth;
Return <delta_x>, a 2-D slab with grid information copied from\
<var>.
UPDATE: 2014-08-05 11:12:27:
In computing <delta_x>, the longitudinal increment should be taken
from the actual longitude axis (bounds).
Fortunately this is not affecting any previous computations which are all
globally.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#----------Get axes---------------------
var=increasingLatitude(var)
lonax=var.getLongitude()
latax_bounds=latax.getBounds()
lonax_bounds=lonax.getBounds()
lon_increment=numpy.ptp(lonax_bounds,axis=1)*numpy.pi/180.
if side=='n':
lats=latax_bounds.max(axis=1)
elif side=='c':
lats=latax[:]
elif side=='s':
lats=latax_bounds.min(axis=1)
lats=abs(lats)*numpy.pi/180.
delta_x=R*numpy.cos(lats)[:,None]*lon_increment[None,:]
delta_x=MV.where(delta_x<=1e-8,1,delta_x)
delta_x.setAxisList((latax,lonax))
return delta_x
#----------Delta_Longitude----------------------------
def dLatitude(var,R=6371000,verbose=True):
'''Return a slab of latitudinal increment (meter) delta_y.
<var>: variable from which latitude axis is abtained;
<R>: radius of Earth;
Return <delta_y>, a 2-D slab with grid information copied from\
<var>.
'''
import numpy
import MV2 as MV
latax=getAxis('lat',var)
lonax=getAxis('lon',var)
#---------Get axes and bounds-------------------
latax_bounds=latax.getBounds()
delta_y=latax_bounds.ptp(axis=1)*numpy.pi/180.*R
#-------Repeat array to get slab---------------
delta_y=MV.repeat(delta_y[:,None],len(lonax),axis=1)
delta_y.setAxisList((latax,lonax))
return delta_y
#------Sort points of 2D coordinates to form a continuous line------------
def getLineFromPoints(points,reverse=False,verbose=True):
'''Sort points of 2D coordinates to form a continuous line.
<points>: Nx2 nd-array, coordinates of (y,x) or (x,y).
<reverse>: bool, if True, reverse the line orientation.
Return <path>: Nx2 nd-array, ordered coordinates.
Author: guangzhi XU (xugzhi1987@gmail.com; guangzhi.xu@outlook.com)
Update time: 2017-11-02 15:57:02.
'''
import numpy
def computePathCost(coords,startidx=0):
'''Compute the cost of path starting from a given start point.
<coords>: list of (y,x) or (x,y) coordinates.
<startidx>: int, index from <coords> as starting point.
Return <path>: orientated coordinates based on nearest neighbors.
<cost>: total squared distances following points in <path>.
'''
def distance(P1, P2):
return (P1[0] - P2[0])**2 + (P1[1] - P2[1])**2
import copy
coord_list=copy.copy(coords)
pass_by=coord_list
path=[coord_list[startidx],]
pass_by.remove(coord_list[startidx])
cost=0
while len(pass_by)>0:
nearest=min(pass_by,key=lambda x: distance(path[-1],x))
cost+=distance(nearest,path[-1])
path.append(nearest)
pass_by.remove(nearest)
return path,cost
N=len(points)
coords=[(points[ii][0],points[ii][1]) for ii in range(N)]
paths=[]
cost=numpy.inf
#-----------Loop through starting points-----------
for ii in range(N):
pathii,costii=computePathCost(coords,ii)
paths.append(pathii)
if costii<cost:
cost=costii
path=pathii
path=numpy.array(path)
if reverse:
path=path[::-1]
return path
#--------------------Get contour from a binary mask--------------------
def getBinContour(mask,lons=None,lats=None,return_largest=True):
'''Get contour from a binary mask
<mask>: 2d array, binary mask.
<lons>,<lats>: 1d array, x, y coordinates for <mask>.
Return <cont>: Nx2 array, coordinates of the contour of the largest
continuous region represented by 1s in <mask>.
'''
import matplotlib.pyplot as plt
import numpy
assert numpy.ndim(mask)==2, "<mask> needs to be 2D."
if lons is not None:
assert numpy.ndim(lons)==1, "<lons> needs to be 1D."
assert len(lons)==mask.shape[1], "<lons> doesn't match <mask> shape."
if lats is not None:
assert numpy.ndim(lats)==1, "<lats> needs to be 1D."
assert len(lats)==mask.shape[0], "<lats> doesn't match <mask> shape."
fig,ax=plt.subplots()
if lons is None:
lons=numpy.arange(mask.shape[1])
if lats is None:
lats=numpy.arange(mask.shape[0])
cs=ax.contourf(lons,lats,mask,[0.9,1.1]).collections
conts=cs[0].get_paths()
if return_largest:
conts.sort(key=lambda x:len(x.vertices))
#cont=conts[-1].vertices
cont=conts[-1]
else:
cont=conts
ax.cla()
plt.close(fig)
return cont
#-----------Find index of value in array-----------
def findIndex(x,a):
'''Find index of value in array
<x>: scalar, value to search.
<a>: 1d array.
Return <idx>: int, index in <a> that a[idx] is closest to <x>.
If <idx> is 0 or len(a)-1, and <x> is too far from the
closest value, return None.
'''
import numpy
if not numpy.isscalar(x):
raise Exception("<x> needs to be scalar.")
if numpy.ndim(a)!=1:
raise Exception("<a> needs to be 1d array.")
idx=numpy.argmin(abs(x-a))
if idx==0 and abs(a[0]-x) > abs(a[1]-a[0]):
idx=None
#raise Exception("<x> not in range of <a>.")
if idx==len(a)-1 and abs(x-a[-1]) > abs(a[-1]-a[-2]):
idx=None
#raise Exception("<x> not in range of <a>.")
return idx
def getBearing(lat1,lon1,lat2,lon2):
'''Compute bearing from point 1 to point2
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
Return <theta>: (forward) bearing in degree.
NOTE that the bearing from P1 to P2 is in general not the same as that
from P2 to P1.
'''
import numpy as np
from numpy import sin, cos
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=lon2-lon1
theta=np.arctan2(sin(dlon)*cos(lat2),
cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(dlon))
theta=theta/np.pi*180
theta=(theta+360)%360
return theta
def getCrossTrackDistance(lat1,lon1,lat2,lon2,lat3,lon3,r=None):
'''Compute cross-track distance
<lat1>, <lon1>: scalar float or nd-array, latitudes and longitudes in
degree, start point of the great circle.
<lat2>, <lon2>: scalar float or nd-array, latitudes and longitudes in
degree, end point of the great circle.
<lat3>, <lon3>: scalar float or nd-array, latitudes and longitudes in
degree, a point away from the great circle.
Return <dxt>: great cicle distance between point P3 to the closest point
on great circle that connects P1 and P2.
NOTE that the sign of dxt tells which side of the 3rd point
P3 is on.
See also getCrossTrackPoint(), getAlongTrackDistance().
'''
import numpy as np
from numpy import sin
if r is None:
r=6371000. #m
# get angular distance between P1 and P3
delta13=greatCircle(lat1,lon1,lat3,lon3,r=1.)
# bearing between P1, P3
theta13=getBearing(lat1,lon1,lat3,lon3)*np.pi/180
# bearing between P1, P2
theta12=getBearing(lat1,lon1,lat2,lon2)*np.pi/180
dtheta=np.arcsin(sin(delta13)*sin(theta13-theta12))
dxt=r*dtheta
return dxt
| '''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result | identifier_body |
aat_common_test.go | // SPDX-License-Identifier: Unlicense OR BSD-3-Clause
package tables
import (
"reflect"
"strings"
"testing"
td "github.com/go-text/typesetting-utils/opentype"
tu "github.com/go-text/typesetting/opentype/testutils"
)
func | (t *testing.T) {
// adapted from fontttools
src := deHexStr(
"0004 0006 0003 000C 0001 0006 " +
"0002 0001 001E " + // glyph 1..2: mapping at offset 0x1E
"0005 0004 001E " + // glyph 4..5: mapping at offset 0x1E
"FFFF FFFF FFFF " + // end of search table
"0007 0008")
class, _, err := ParseAATLookup(src, 4)
tu.AssertNoErr(t, err)
gids := []GlyphID{1, 2, 4, 5}
classes := []uint16{7, 8, 7, 8}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found := class.Class(0xFF)
tu.Assert(t, !found)
// extracted from macos Tamil MN font
src = []byte{0, 4, 0, 6, 0, 5, 0, 24, 0, 2, 0, 6, 0, 151, 0, 129, 0, 42, 0, 156, 0, 153, 0, 88, 0, 163, 0, 163, 0, 96, 1, 48, 1, 48, 0, 98, 255, 255, 255, 255, 0, 100, 0, 4, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 32}
class, _, err = ParseAATLookup(src, 0xFFFF)
tu.AssertNoErr(t, err)
gids = []GlyphID{132, 129, 144, 145, 146, 140, 137, 130, 135, 138, 133, 139, 142, 143, 136, 134, 147, 141, 151, 132, 150, 148, 149, 304, 153, 154, 163, 155, 156}
classes = []uint16{
12, 4, 24, 25, 26, 20, 17, 10, 15, 18, 13, 19, 22, 23, 16, 14, 27, 21, 31, 12, 30, 28, 29, 32, 5, 6, 9, 7, 8,
}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found = class.Class(0xFF)
tu.Assert(t, !found)
}
func TestParseTrak(t *testing.T) {
fp := readFontFile(t, "toys/Trak.ttf")
trak, _, err := ParseTrak(readTable(t, fp, "trak"))
tu.AssertNoErr(t, err)
tu.Assert(t, len(trak.Horiz.SizeTable) == 4)
tu.Assert(t, len(trak.Vert.SizeTable) == 0)
tu.Assert(t, reflect.DeepEqual(trak.Horiz.SizeTable, []float32{1, 2, 12, 96}))
tu.Assert(t, reflect.DeepEqual(trak.Horiz.TrackTable[0].PerSizeTracking, []int16{200, 200, 0, -100}))
}
func TestParseFeat(t *testing.T) {
fp := readFontFile(t, "toys/Feat.ttf")
feat, _, err := ParseFeat(readTable(t, fp, "feat"))
tu.AssertNoErr(t, err)
expectedSettings := [...][]FeatureSettingName{
{{2, 260}, {4, 259}, {10, 304}},
{{0, 309}, {1, 263}, {3, 264}},
{{0, 266}, {1, 267}},
{{0, 271}, {2, 272}, {8, 273}},
{{0, 309}, {1, 275}, {2, 277}, {3, 278}},
{{0, 309}, {2, 280}},
{{0, 283}},
{{8, 308}},
{{0, 309}, {3, 289}},
{{0, 294}, {1, 295}, {2, 296}, {3, 297}},
{{0, 309}, {1, 301}},
}
tu.Assert(t, len(feat.Names) == len(expectedSettings))
for i, name := range feat.Names {
exp := expectedSettings[i]
got := name.SettingTable
tu.Assert(t, reflect.DeepEqual(exp, got))
}
}
func TestParseAnkr(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ankr.bin")
tu.AssertNoErr(t, err)
ankr, _, err := ParseAnkr(table, 1409)
tu.AssertNoErr(t, err)
_, isFormat4 := ankr.lookupTable.(AATLoopkup4)
tu.Assert(t, isFormat4)
}
func TestParseMorx(t *testing.T) {
files := tu.Filenames(t, "morx")
files = append(files, "toys/Trak.ttf")
for _, filename := range files {
fp := readFontFile(t, filename)
ng := numGlyphs(t, fp)
table, _, err := ParseMorx(readTable(t, fp, "morx"), ng)
tu.AssertNoErr(t, err)
tu.Assert(t, int(table.nChains) == len(table.Chains))
tu.Assert(t, int(table.nChains) == 1)
for _, chain := range table.Chains {
tu.AssertNoErr(t, err)
tu.Assert(t, len(chain.Subtables) == int(chain.nSubtable))
tu.Assert(t, chain.Flags == 1)
}
}
}
func TestMorxLigature(t *testing.T) {
// imported from fonttools
// Taken from “Example 2: A ligature table” in
// https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
// as retrieved on 2017-09-11.
//
// Compared to the example table in Apple’s specification, we’ve
// made the following changes:
//
// * at offsets 0..35, we’ve prepended 36 bytes of boilerplate
// to make the data a structurally valid ‘morx’ table;
//
// * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve
// changed the range of the third segment from 23..24 to 26..28.
// The hexdump values in Apple’s specification are completely wrong;
// the values from the comments would work, but they can be encoded
// more compactly than in the specification example. For round-trip
// testing, we omit the ‘f’ glyph, which makes AAT lookup format 2
// the most compact encoding;
//
// * at offsets 92..93 (offsets 56..57 in Apple’s document), we’ve
// changed the glyph class of the third segment from 5 to 6, which
// matches the values from the comments to the spec (but not the
// Apple’s hexdump).
morxLigatureData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00DA " + // 12: StructLength=218 (+8=226)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 00CA " + // 24: Subtable[0].StructLength=202 (+24=226)
"80 " + // 28: Subtable[0].CoverageFlags=0x80
"00 00 " + // 29: Subtable[0].Reserved=0
"02 " + // 31: Subtable[0].MorphType=2/LigatureMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
// State table header.
"0000 0007 " + // 36: STXHeader.ClassCount=7
"0000 001C " + // 40: STXHeader.ClassTableOffset=28 (+36=64)
"0000 0040 " + // 44: STXHeader.StateArrayOffset=64 (+36=100)
"0000 0078 " + // 48: STXHeader.EntryTableOffset=120 (+36=156)
"0000 0090 " + // 52: STXHeader.LigActionsOffset=144 (+36=180)
"0000 009C " + // 56: STXHeader.LigComponentsOffset=156 (+36=192)
"0000 00AE " + // 60: STXHeader.LigListOffset=174 (+36=210)
// Glyph class table.
"0002 0006 " + // 64: ClassTable.LookupFormat=2, .UnitSize=6
"0003 000C " + // 68: .NUnits=3, .SearchRange=12
"0001 0006 " + // 72: .EntrySelector=1, .RangeShift=6
"0016 0014 0004 " + // 76: GlyphID 20..22 [a..c] -> GlyphClass 4
"0018 0017 0005 " + // 82: GlyphID 23..24 [d..e] -> GlyphClass 5
"001C 001A 0006 " + // 88: GlyphID 26..28 [g..i] -> GlyphClass 6
"FFFF FFFF 0000 " + // 94: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 0000 " + // 100: State[0][0..6]
"0000 0000 0000 0000 0001 0000 0000 " + // 114: State[1][0..6]
"0000 0000 0000 0000 0001 0002 0000 " + // 128: State[2][0..6]
"0000 0000 0000 0000 0001 0002 0003 " + // 142: State[3][0..6]
// Entry table.
"0000 0000 " + // 156: Entries[0].NewState=0, .Flags=0
"0000 " + // 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
"0002 8000 " + // 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
"0000 " + // 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
"0003 8000 " + // 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
"0000 " + // 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
"0000 A000 " + // 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
"0000 " + // 178: Entries[3].ActionIndex=0 (start at Action[0])
// Ligature actions table.
"3FFF FFE7 " + // 180: Action[0].Flags=0, .GlyphIndexDelta=-25
"3FFF FFED " + // 184: Action[1].Flags=0, .GlyphIndexDelta=-19
"BFFF FFF2 " + // 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
// Ligature component table.
"0000 0001 " + // 192: LigComponent[0]=0, LigComponent[1]=1
"0002 0003 " + // 196: LigComponent[2]=2, LigComponent[3]=3
"0000 0004 " + // 200: LigComponent[4]=0, LigComponent[5]=4
"0000 0008 " + // 204: LigComponent[6]=0, LigComponent[7]=8
"0010 " + // 208: LigComponent[8]=16
// Ligature list.
"03E8 03E9 " + // 210: LigList[0]=1000, LigList[1]=1001
"03EA 03EB " + // 214: LigList[2]=1002, LigList[3]=1003
"03EC 03ED " + // 218: LigList[4]=1004, LigList[3]=1005
"03EE 03EF ") // 222: LigList[5]=1006, LigList[6]=1007
tu.Assert(t, len(morxLigatureData) == 226)
out, _, err := ParseMorx(morxLigatureData, 1515)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0x80
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
lig, ok := subtable.Data.(MorxSubtableLigature)
tu.Assert(t, ok)
machine := lig.AATStateTableExt
tu.Assert(t, machine.StateSize == 7)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 20, LastGlyph: 22, Value: 4},
{FirstGlyph: 23, LastGlyph: 24, Value: 5},
{FirstGlyph: 26, LastGlyph: 28, Value: 6},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[0][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[1][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0000}, // State[2][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0003}, // State[3][0..6]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0},
{NewState: 0x0002, Flags: 0x8000},
{NewState: 0x0003, Flags: 0x8000},
{NewState: 0, Flags: 0xA000},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
expLigActions := []uint32{
0x3FFFFFE7,
0x3FFFFFED,
0xBFFFFFF2,
}
expComponents := []uint16{0, 1, 2, 3, 0, 4, 0, 8, 16}
expLigatures := []GlyphID{
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
}
tu.Assert(t, reflect.DeepEqual(lig.LigActions, expLigActions))
tu.Assert(t, reflect.DeepEqual(lig.Components, expComponents))
tu.Assert(t, reflect.DeepEqual(lig.Ligatures, expLigatures))
}
func TestMorxInsertion(t *testing.T) {
// imported from fonttools
// Taken from the `morx` table of the second font in DevanagariSangamMN.ttc
// on macOS X 10.12.6; manually pruned to just contain the insertion lookup.
morxInsertionData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00A4 " + // 12: StructLength=164 (+8=172)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 0094 " + // 24: Subtable[0].StructLength=148 (+24=172)
"00 " + // 28: Subtable[0].CoverageFlags=0x00
"00 00 " + // 29: Subtable[0].Reserved=0
"05 " + // 31: Subtable[0].MorphType=5/InsertionMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
"0000 0006 " + // 36: STXHeader.ClassCount=6
"0000 0014 " + // 40: STXHeader.ClassTableOffset=20 (+36=56)
"0000 004A " + // 44: STXHeader.StateArrayOffset=74 (+36=110)
"0000 006E " + // 48: STXHeader.EntryTableOffset=110 (+36=146)
"0000 0086 " + // 52: STXHeader.InsertionActionOffset=134 (+36=170)
// Glyph class table.
"0002 0006 " + // 56: ClassTable.LookupFormat=2, .UnitSize=6
"0006 0018 " + // 60: .NUnits=6, .SearchRange=24
"0002 000C " + // 64: .EntrySelector=2, .RangeShift=12
"00AC 00AC 0005 " + // 68: GlyphID 172..172 -> GlyphClass 5
"01EB 01E6 0005 " + // 74: GlyphID 486..491 -> GlyphClass 5
"01F0 01F0 0004 " + // 80: GlyphID 496..496 -> GlyphClass 4
"01F8 01F6 0004 " + // 88: GlyphID 502..504 -> GlyphClass 4
"01FC 01FA 0004 " + // 92: GlyphID 506..508 -> GlyphClass 4
"0250 0250 0005 " + // 98: GlyphID 592..592 -> GlyphClass 5
"FFFF FFFF 0000 " + // 104: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 " + // 110: State[0][0..5]
"0000 0000 0000 0000 0001 0000 " + // 122: State[1][0..5]
"0000 0000 0001 0000 0001 0002 " + // 134: State[2][0..5]
// Entry table.
"0000 0000 " + // 146: Entries[0].NewState=0, .Flags=0
"FFFF " + // 150: Entries[0].CurrentInsertIndex=<None>
"FFFF " + // 152: Entries[0].MarkedInsertIndex=<None>
"0002 0000 " + // 154: Entries[1].NewState=0, .Flags=0
"FFFF " + // 158: Entries[1].CurrentInsertIndex=<None>
"FFFF " + // 160: Entries[1].MarkedInsertIndex=<None>
"0000 " + // 162: Entries[2].NewState=0
"2820 " + // 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
// .CurrentInsertCount=1, .MarkedInsertCount=0
"0000 " + // 166: Entries[1].CurrentInsertIndex=0
"FFFF " + // 168: Entries[1].MarkedInsertIndex=<None>
// Insertion action table.
"022F") // 170: InsertionActionTable[0]=GlyphID 559
tu.Assert(t, len(morxInsertionData) == 172)
out, _, err := ParseMorx(morxInsertionData, 910)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
insert, ok := subtable.Data.(MorxSubtableInsertion)
tu.Assert(t, ok)
machine := insert.AATStateTableExt
tu.Assert(t, machine.StateSize == 6)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 172, LastGlyph: 172, Value: 5},
{FirstGlyph: 486, LastGlyph: 491, Value: 5},
{FirstGlyph: 496, LastGlyph: 496, Value: 4},
{FirstGlyph: 502, LastGlyph: 504, Value: 4},
{FirstGlyph: 506, LastGlyph: 508, Value: 4},
{FirstGlyph: 592, LastGlyph: 592, Value: 5},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 110: State[0][0..5]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 122: State[1][0..5]
{0x0000, 0x0000, 0x0001, 0x0000, 0x0001, 0x0002}, // 134: State[2][0..5]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0x0002, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0, Flags: 0x2820, data: [4]byte{0, 0, 0xff, 0xff}},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
tu.Assert(t, reflect.DeepEqual(insert.Insertions, []GlyphID{0x022f}))
}
func TestParseKerx(t *testing.T) {
for _, filepath := range []string{
"toys/tables/kerx0.bin",
"toys/tables/kerx2.bin",
"toys/tables/kerx2bis.bin",
"toys/tables/kerx24.bin",
"toys/tables/kerx4-1.bin",
"toys/tables/kerx4-2.bin",
"toys/tables/kerx6Exp-VF.bin",
"toys/tables/kerx6-VF.bin",
} {
table, err := td.Files.ReadFile(filepath)
tu.AssertNoErr(t, err)
kerx, _, err := ParseKerx(table, 0xFF)
tu.AssertNoErr(t, err)
tu.Assert(t, len(kerx.Tables) > 0)
for _, subtable := range kerx.Tables {
tu.Assert(t, subtable.TupleCount > 0 == strings.Contains(filepath, "VF"))
switch data := subtable.Data.(type) {
case KerxData0:
tu.Assert(t, len(data.Pairs) > 0)
case KerxData2:
tu.Assert(t, data.Left != nil)
tu.Assert(t, data.Right != nil)
tu.Assert(t, int(data.KerningStart) <= len(data.KerningData))
case KerxData4:
tu.Assert(t, data.Anchors != nil)
}
}
}
}
func TestInvalidFeat(t *testing.T) {
// this is an invalid feat table, comming from a real font table (huh...)
table, err := td.Files.ReadFile("toys/tables/featInvalid.bin")
tu.AssertNoErr(t, err)
_, _, err = ParseFeat(table)
tu.Assert(t, err != nil)
}
func TestParseLtag(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ltag.bin")
tu.AssertNoErr(t, err)
ltag, _, err := ParseLtag(table)
tu.AssertNoErr(t, err)
tu.Assert(t, len(ltag.tagRange) == 1)
tu.Assert(t, ltag.Language(0) == "pl")
}
| TestAATLookup4 | identifier_name |
aat_common_test.go | // SPDX-License-Identifier: Unlicense OR BSD-3-Clause
package tables
import (
"reflect"
"strings"
"testing"
td "github.com/go-text/typesetting-utils/opentype"
tu "github.com/go-text/typesetting/opentype/testutils"
)
func TestAATLookup4(t *testing.T) |
func TestParseTrak(t *testing.T) {
fp := readFontFile(t, "toys/Trak.ttf")
trak, _, err := ParseTrak(readTable(t, fp, "trak"))
tu.AssertNoErr(t, err)
tu.Assert(t, len(trak.Horiz.SizeTable) == 4)
tu.Assert(t, len(trak.Vert.SizeTable) == 0)
tu.Assert(t, reflect.DeepEqual(trak.Horiz.SizeTable, []float32{1, 2, 12, 96}))
tu.Assert(t, reflect.DeepEqual(trak.Horiz.TrackTable[0].PerSizeTracking, []int16{200, 200, 0, -100}))
}
func TestParseFeat(t *testing.T) {
fp := readFontFile(t, "toys/Feat.ttf")
feat, _, err := ParseFeat(readTable(t, fp, "feat"))
tu.AssertNoErr(t, err)
expectedSettings := [...][]FeatureSettingName{
{{2, 260}, {4, 259}, {10, 304}},
{{0, 309}, {1, 263}, {3, 264}},
{{0, 266}, {1, 267}},
{{0, 271}, {2, 272}, {8, 273}},
{{0, 309}, {1, 275}, {2, 277}, {3, 278}},
{{0, 309}, {2, 280}},
{{0, 283}},
{{8, 308}},
{{0, 309}, {3, 289}},
{{0, 294}, {1, 295}, {2, 296}, {3, 297}},
{{0, 309}, {1, 301}},
}
tu.Assert(t, len(feat.Names) == len(expectedSettings))
for i, name := range feat.Names {
exp := expectedSettings[i]
got := name.SettingTable
tu.Assert(t, reflect.DeepEqual(exp, got))
}
}
func TestParseAnkr(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ankr.bin")
tu.AssertNoErr(t, err)
ankr, _, err := ParseAnkr(table, 1409)
tu.AssertNoErr(t, err)
_, isFormat4 := ankr.lookupTable.(AATLoopkup4)
tu.Assert(t, isFormat4)
}
func TestParseMorx(t *testing.T) {
files := tu.Filenames(t, "morx")
files = append(files, "toys/Trak.ttf")
for _, filename := range files {
fp := readFontFile(t, filename)
ng := numGlyphs(t, fp)
table, _, err := ParseMorx(readTable(t, fp, "morx"), ng)
tu.AssertNoErr(t, err)
tu.Assert(t, int(table.nChains) == len(table.Chains))
tu.Assert(t, int(table.nChains) == 1)
for _, chain := range table.Chains {
tu.AssertNoErr(t, err)
tu.Assert(t, len(chain.Subtables) == int(chain.nSubtable))
tu.Assert(t, chain.Flags == 1)
}
}
}
func TestMorxLigature(t *testing.T) {
// imported from fonttools
// Taken from “Example 2: A ligature table” in
// https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
// as retrieved on 2017-09-11.
//
// Compared to the example table in Apple’s specification, we’ve
// made the following changes:
//
// * at offsets 0..35, we’ve prepended 36 bytes of boilerplate
// to make the data a structurally valid ‘morx’ table;
//
// * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve
// changed the range of the third segment from 23..24 to 26..28.
// The hexdump values in Apple’s specification are completely wrong;
// the values from the comments would work, but they can be encoded
// more compactly than in the specification example. For round-trip
// testing, we omit the ‘f’ glyph, which makes AAT lookup format 2
// the most compact encoding;
//
// * at offsets 92..93 (offsets 56..57 in Apple’s document), we’ve
// changed the glyph class of the third segment from 5 to 6, which
// matches the values from the comments to the spec (but not the
// Apple’s hexdump).
morxLigatureData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00DA " + // 12: StructLength=218 (+8=226)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 00CA " + // 24: Subtable[0].StructLength=202 (+24=226)
"80 " + // 28: Subtable[0].CoverageFlags=0x80
"00 00 " + // 29: Subtable[0].Reserved=0
"02 " + // 31: Subtable[0].MorphType=2/LigatureMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
// State table header.
"0000 0007 " + // 36: STXHeader.ClassCount=7
"0000 001C " + // 40: STXHeader.ClassTableOffset=28 (+36=64)
"0000 0040 " + // 44: STXHeader.StateArrayOffset=64 (+36=100)
"0000 0078 " + // 48: STXHeader.EntryTableOffset=120 (+36=156)
"0000 0090 " + // 52: STXHeader.LigActionsOffset=144 (+36=180)
"0000 009C " + // 56: STXHeader.LigComponentsOffset=156 (+36=192)
"0000 00AE " + // 60: STXHeader.LigListOffset=174 (+36=210)
// Glyph class table.
"0002 0006 " + // 64: ClassTable.LookupFormat=2, .UnitSize=6
"0003 000C " + // 68: .NUnits=3, .SearchRange=12
"0001 0006 " + // 72: .EntrySelector=1, .RangeShift=6
"0016 0014 0004 " + // 76: GlyphID 20..22 [a..c] -> GlyphClass 4
"0018 0017 0005 " + // 82: GlyphID 23..24 [d..e] -> GlyphClass 5
"001C 001A 0006 " + // 88: GlyphID 26..28 [g..i] -> GlyphClass 6
"FFFF FFFF 0000 " + // 94: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 0000 " + // 100: State[0][0..6]
"0000 0000 0000 0000 0001 0000 0000 " + // 114: State[1][0..6]
"0000 0000 0000 0000 0001 0002 0000 " + // 128: State[2][0..6]
"0000 0000 0000 0000 0001 0002 0003 " + // 142: State[3][0..6]
// Entry table.
"0000 0000 " + // 156: Entries[0].NewState=0, .Flags=0
"0000 " + // 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
"0002 8000 " + // 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
"0000 " + // 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
"0003 8000 " + // 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
"0000 " + // 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
"0000 A000 " + // 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
"0000 " + // 178: Entries[3].ActionIndex=0 (start at Action[0])
// Ligature actions table.
"3FFF FFE7 " + // 180: Action[0].Flags=0, .GlyphIndexDelta=-25
"3FFF FFED " + // 184: Action[1].Flags=0, .GlyphIndexDelta=-19
"BFFF FFF2 " + // 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
// Ligature component table.
"0000 0001 " + // 192: LigComponent[0]=0, LigComponent[1]=1
"0002 0003 " + // 196: LigComponent[2]=2, LigComponent[3]=3
"0000 0004 " + // 200: LigComponent[4]=0, LigComponent[5]=4
"0000 0008 " + // 204: LigComponent[6]=0, LigComponent[7]=8
"0010 " + // 208: LigComponent[8]=16
// Ligature list.
"03E8 03E9 " + // 210: LigList[0]=1000, LigList[1]=1001
"03EA 03EB " + // 214: LigList[2]=1002, LigList[3]=1003
"03EC 03ED " + // 218: LigList[4]=1004, LigList[3]=1005
"03EE 03EF ") // 222: LigList[5]=1006, LigList[6]=1007
tu.Assert(t, len(morxLigatureData) == 226)
out, _, err := ParseMorx(morxLigatureData, 1515)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0x80
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
lig, ok := subtable.Data.(MorxSubtableLigature)
tu.Assert(t, ok)
machine := lig.AATStateTableExt
tu.Assert(t, machine.StateSize == 7)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 20, LastGlyph: 22, Value: 4},
{FirstGlyph: 23, LastGlyph: 24, Value: 5},
{FirstGlyph: 26, LastGlyph: 28, Value: 6},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[0][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[1][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0000}, // State[2][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0003}, // State[3][0..6]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0},
{NewState: 0x0002, Flags: 0x8000},
{NewState: 0x0003, Flags: 0x8000},
{NewState: 0, Flags: 0xA000},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
expLigActions := []uint32{
0x3FFFFFE7,
0x3FFFFFED,
0xBFFFFFF2,
}
expComponents := []uint16{0, 1, 2, 3, 0, 4, 0, 8, 16}
expLigatures := []GlyphID{
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
}
tu.Assert(t, reflect.DeepEqual(lig.LigActions, expLigActions))
tu.Assert(t, reflect.DeepEqual(lig.Components, expComponents))
tu.Assert(t, reflect.DeepEqual(lig.Ligatures, expLigatures))
}
func TestMorxInsertion(t *testing.T) {
// imported from fonttools
// Taken from the `morx` table of the second font in DevanagariSangamMN.ttc
// on macOS X 10.12.6; manually pruned to just contain the insertion lookup.
morxInsertionData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00A4 " + // 12: StructLength=164 (+8=172)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 0094 " + // 24: Subtable[0].StructLength=148 (+24=172)
"00 " + // 28: Subtable[0].CoverageFlags=0x00
"00 00 " + // 29: Subtable[0].Reserved=0
"05 " + // 31: Subtable[0].MorphType=5/InsertionMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
"0000 0006 " + // 36: STXHeader.ClassCount=6
"0000 0014 " + // 40: STXHeader.ClassTableOffset=20 (+36=56)
"0000 004A " + // 44: STXHeader.StateArrayOffset=74 (+36=110)
"0000 006E " + // 48: STXHeader.EntryTableOffset=110 (+36=146)
"0000 0086 " + // 52: STXHeader.InsertionActionOffset=134 (+36=170)
// Glyph class table.
"0002 0006 " + // 56: ClassTable.LookupFormat=2, .UnitSize=6
"0006 0018 " + // 60: .NUnits=6, .SearchRange=24
"0002 000C " + // 64: .EntrySelector=2, .RangeShift=12
"00AC 00AC 0005 " + // 68: GlyphID 172..172 -> GlyphClass 5
"01EB 01E6 0005 " + // 74: GlyphID 486..491 -> GlyphClass 5
"01F0 01F0 0004 " + // 80: GlyphID 496..496 -> GlyphClass 4
"01F8 01F6 0004 " + // 88: GlyphID 502..504 -> GlyphClass 4
"01FC 01FA 0004 " + // 92: GlyphID 506..508 -> GlyphClass 4
"0250 0250 0005 " + // 98: GlyphID 592..592 -> GlyphClass 5
"FFFF FFFF 0000 " + // 104: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 " + // 110: State[0][0..5]
"0000 0000 0000 0000 0001 0000 " + // 122: State[1][0..5]
"0000 0000 0001 0000 0001 0002 " + // 134: State[2][0..5]
// Entry table.
"0000 0000 " + // 146: Entries[0].NewState=0, .Flags=0
"FFFF " + // 150: Entries[0].CurrentInsertIndex=<None>
"FFFF " + // 152: Entries[0].MarkedInsertIndex=<None>
"0002 0000 " + // 154: Entries[1].NewState=0, .Flags=0
"FFFF " + // 158: Entries[1].CurrentInsertIndex=<None>
"FFFF " + // 160: Entries[1].MarkedInsertIndex=<None>
"0000 " + // 162: Entries[2].NewState=0
"2820 " + // 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
// .CurrentInsertCount=1, .MarkedInsertCount=0
"0000 " + // 166: Entries[1].CurrentInsertIndex=0
"FFFF " + // 168: Entries[1].MarkedInsertIndex=<None>
// Insertion action table.
"022F") // 170: InsertionActionTable[0]=GlyphID 559
tu.Assert(t, len(morxInsertionData) == 172)
out, _, err := ParseMorx(morxInsertionData, 910)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
insert, ok := subtable.Data.(MorxSubtableInsertion)
tu.Assert(t, ok)
machine := insert.AATStateTableExt
tu.Assert(t, machine.StateSize == 6)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 172, LastGlyph: 172, Value: 5},
{FirstGlyph: 486, LastGlyph: 491, Value: 5},
{FirstGlyph: 496, LastGlyph: 496, Value: 4},
{FirstGlyph: 502, LastGlyph: 504, Value: 4},
{FirstGlyph: 506, LastGlyph: 508, Value: 4},
{FirstGlyph: 592, LastGlyph: 592, Value: 5},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 110: State[0][0..5]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 122: State[1][0..5]
{0x0000, 0x0000, 0x0001, 0x0000, 0x0001, 0x0002}, // 134: State[2][0..5]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0x0002, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0, Flags: 0x2820, data: [4]byte{0, 0, 0xff, 0xff}},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
tu.Assert(t, reflect.DeepEqual(insert.Insertions, []GlyphID{0x022f}))
}
func TestParseKerx(t *testing.T) {
for _, filepath := range []string{
"toys/tables/kerx0.bin",
"toys/tables/kerx2.bin",
"toys/tables/kerx2bis.bin",
"toys/tables/kerx24.bin",
"toys/tables/kerx4-1.bin",
"toys/tables/kerx4-2.bin",
"toys/tables/kerx6Exp-VF.bin",
"toys/tables/kerx6-VF.bin",
} {
table, err := td.Files.ReadFile(filepath)
tu.AssertNoErr(t, err)
kerx, _, err := ParseKerx(table, 0xFF)
tu.AssertNoErr(t, err)
tu.Assert(t, len(kerx.Tables) > 0)
for _, subtable := range kerx.Tables {
tu.Assert(t, subtable.TupleCount > 0 == strings.Contains(filepath, "VF"))
switch data := subtable.Data.(type) {
case KerxData0:
tu.Assert(t, len(data.Pairs) > 0)
case KerxData2:
tu.Assert(t, data.Left != nil)
tu.Assert(t, data.Right != nil)
tu.Assert(t, int(data.KerningStart) <= len(data.KerningData))
case KerxData4:
tu.Assert(t, data.Anchors != nil)
}
}
}
}
func TestInvalidFeat(t *testing.T) {
// this is an invalid feat table, comming from a real font table (huh...)
table, err := td.Files.ReadFile("toys/tables/featInvalid.bin")
tu.AssertNoErr(t, err)
_, _, err = ParseFeat(table)
tu.Assert(t, err != nil)
}
func TestParseLtag(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ltag.bin")
tu.AssertNoErr(t, err)
ltag, _, err := ParseLtag(table)
tu.AssertNoErr(t, err)
tu.Assert(t, len(ltag.tagRange) == 1)
tu.Assert(t, ltag.Language(0) == "pl")
}
| {
// adapted from fontttools
src := deHexStr(
"0004 0006 0003 000C 0001 0006 " +
"0002 0001 001E " + // glyph 1..2: mapping at offset 0x1E
"0005 0004 001E " + // glyph 4..5: mapping at offset 0x1E
"FFFF FFFF FFFF " + // end of search table
"0007 0008")
class, _, err := ParseAATLookup(src, 4)
tu.AssertNoErr(t, err)
gids := []GlyphID{1, 2, 4, 5}
classes := []uint16{7, 8, 7, 8}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found := class.Class(0xFF)
tu.Assert(t, !found)
// extracted from macos Tamil MN font
src = []byte{0, 4, 0, 6, 0, 5, 0, 24, 0, 2, 0, 6, 0, 151, 0, 129, 0, 42, 0, 156, 0, 153, 0, 88, 0, 163, 0, 163, 0, 96, 1, 48, 1, 48, 0, 98, 255, 255, 255, 255, 0, 100, 0, 4, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 32}
class, _, err = ParseAATLookup(src, 0xFFFF)
tu.AssertNoErr(t, err)
gids = []GlyphID{132, 129, 144, 145, 146, 140, 137, 130, 135, 138, 133, 139, 142, 143, 136, 134, 147, 141, 151, 132, 150, 148, 149, 304, 153, 154, 163, 155, 156}
classes = []uint16{
12, 4, 24, 25, 26, 20, 17, 10, 15, 18, 13, 19, 22, 23, 16, 14, 27, 21, 31, 12, 30, 28, 29, 32, 5, 6, 9, 7, 8,
}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found = class.Class(0xFF)
tu.Assert(t, !found)
} | identifier_body |
aat_common_test.go | // SPDX-License-Identifier: Unlicense OR BSD-3-Clause
package tables
import (
"reflect"
"strings"
"testing"
td "github.com/go-text/typesetting-utils/opentype"
tu "github.com/go-text/typesetting/opentype/testutils"
)
func TestAATLookup4(t *testing.T) {
// adapted from fontttools
src := deHexStr(
"0004 0006 0003 000C 0001 0006 " +
"0002 0001 001E " + // glyph 1..2: mapping at offset 0x1E
"0005 0004 001E " + // glyph 4..5: mapping at offset 0x1E
"FFFF FFFF FFFF " + // end of search table
"0007 0008")
class, _, err := ParseAATLookup(src, 4)
tu.AssertNoErr(t, err)
gids := []GlyphID{1, 2, 4, 5}
classes := []uint16{7, 8, 7, 8}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found := class.Class(0xFF)
tu.Assert(t, !found)
// extracted from macos Tamil MN font
src = []byte{0, 4, 0, 6, 0, 5, 0, 24, 0, 2, 0, 6, 0, 151, 0, 129, 0, 42, 0, 156, 0, 153, 0, 88, 0, 163, 0, 163, 0, 96, 1, 48, 1, 48, 0, 98, 255, 255, 255, 255, 0, 100, 0, 4, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 32}
class, _, err = ParseAATLookup(src, 0xFFFF)
tu.AssertNoErr(t, err)
gids = []GlyphID{132, 129, 144, 145, 146, 140, 137, 130, 135, 138, 133, 139, 142, 143, 136, 134, 147, 141, 151, 132, 150, 148, 149, 304, 153, 154, 163, 155, 156}
classes = []uint16{
12, 4, 24, 25, 26, 20, 17, 10, 15, 18, 13, 19, 22, 23, 16, 14, 27, 21, 31, 12, 30, 28, 29, 32, 5, 6, 9, 7, 8,
}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found = class.Class(0xFF)
tu.Assert(t, !found)
}
func TestParseTrak(t *testing.T) {
fp := readFontFile(t, "toys/Trak.ttf")
trak, _, err := ParseTrak(readTable(t, fp, "trak"))
tu.AssertNoErr(t, err)
tu.Assert(t, len(trak.Horiz.SizeTable) == 4)
tu.Assert(t, len(trak.Vert.SizeTable) == 0)
tu.Assert(t, reflect.DeepEqual(trak.Horiz.SizeTable, []float32{1, 2, 12, 96}))
tu.Assert(t, reflect.DeepEqual(trak.Horiz.TrackTable[0].PerSizeTracking, []int16{200, 200, 0, -100}))
}
func TestParseFeat(t *testing.T) {
fp := readFontFile(t, "toys/Feat.ttf")
feat, _, err := ParseFeat(readTable(t, fp, "feat"))
tu.AssertNoErr(t, err)
expectedSettings := [...][]FeatureSettingName{
{{2, 260}, {4, 259}, {10, 304}},
{{0, 309}, {1, 263}, {3, 264}},
{{0, 266}, {1, 267}},
{{0, 271}, {2, 272}, {8, 273}},
{{0, 309}, {1, 275}, {2, 277}, {3, 278}},
{{0, 309}, {2, 280}},
{{0, 283}},
{{8, 308}},
{{0, 309}, {3, 289}},
{{0, 294}, {1, 295}, {2, 296}, {3, 297}},
{{0, 309}, {1, 301}},
}
tu.Assert(t, len(feat.Names) == len(expectedSettings))
for i, name := range feat.Names {
exp := expectedSettings[i]
got := name.SettingTable
tu.Assert(t, reflect.DeepEqual(exp, got))
}
}
func TestParseAnkr(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ankr.bin")
tu.AssertNoErr(t, err)
ankr, _, err := ParseAnkr(table, 1409)
tu.AssertNoErr(t, err)
_, isFormat4 := ankr.lookupTable.(AATLoopkup4)
tu.Assert(t, isFormat4)
}
func TestParseMorx(t *testing.T) {
files := tu.Filenames(t, "morx")
files = append(files, "toys/Trak.ttf")
for _, filename := range files {
fp := readFontFile(t, filename)
ng := numGlyphs(t, fp)
table, _, err := ParseMorx(readTable(t, fp, "morx"), ng)
tu.AssertNoErr(t, err)
tu.Assert(t, int(table.nChains) == len(table.Chains))
tu.Assert(t, int(table.nChains) == 1)
for _, chain := range table.Chains {
tu.AssertNoErr(t, err)
tu.Assert(t, len(chain.Subtables) == int(chain.nSubtable))
tu.Assert(t, chain.Flags == 1)
}
}
}
func TestMorxLigature(t *testing.T) {
// imported from fonttools
// Taken from “Example 2: A ligature table” in
// https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
// as retrieved on 2017-09-11.
//
// Compared to the example table in Apple’s specification, we’ve
// made the following changes:
//
// * at offsets 0..35, we’ve prepended 36 bytes of boilerplate
// to make the data a structurally valid ‘morx’ table;
//
// * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve
// changed the range of the third segment from 23..24 to 26..28.
// The hexdump values in Apple’s specification are completely wrong;
// the values from the comments would work, but they can be encoded
// more compactly than in the specification example. For round-trip
// testing, we omit the ‘f’ glyph, which makes AAT lookup format 2
// the most compact encoding;
//
// * at offsets 92..93 (offsets 56..57 in Apple’s document), we’ve
// changed the glyph class of the third segment from 5 to 6, which
// matches the values from the comments to the spec (but not the
// Apple’s hexdump).
morxLigatureData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00DA " + // 12: StructLength=218 (+8=226)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 00CA " + // 24: Subtable[0].StructLength=202 (+24=226)
"80 " + // 28: Subtable[0].CoverageFlags=0x80
"00 00 " + // 29: Subtable[0].Reserved=0
"02 " + // 31: Subtable[0].MorphType=2/LigatureMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
// State table header.
"0000 0007 " + // 36: STXHeader.ClassCount=7
"0000 001C " + // 40: STXHeader.ClassTableOffset=28 (+36=64)
"0000 0040 " + // 44: STXHeader.StateArrayOffset=64 (+36=100)
"0000 0078 " + // 48: STXHeader.EntryTableOffset=120 (+36=156)
"0000 0090 " + // 52: STXHeader.LigActionsOffset=144 (+36=180)
"0000 009C " + // 56: STXHeader.LigComponentsOffset=156 (+36=192)
"0000 00AE " + // 60: STXHeader.LigListOffset=174 (+36=210)
// Glyph class table.
"0002 0006 " + // 64: ClassTable.LookupFormat=2, .UnitSize=6
"0003 000C " + // 68: .NUnits=3, .SearchRange=12
"0001 0006 " + // 72: .EntrySelector=1, .RangeShift=6
"0016 0014 0004 " + // 76: GlyphID 20..22 [a..c] -> GlyphClass 4
"0018 0017 0005 " + // 82: GlyphID 23..24 [d..e] -> GlyphClass 5
"001C 001A 0006 " + // 88: GlyphID 26..28 [g..i] -> GlyphClass 6
"FFFF FFFF 0000 " + // 94: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 0000 " + // 100: State[0][0..6]
"0000 0000 0000 0000 0001 0000 0000 " + // 114: State[1][0..6]
"0000 0000 0000 0000 0001 0002 0000 " + // 128: State[2][0..6]
"0000 0000 0000 0000 0001 0002 0003 " + // 142: State[3][0..6]
// Entry table.
"0000 0000 " + // 156: Entries[0].NewState=0, .Flags=0
"0000 " + // 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
"0002 8000 " + // 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
"0000 " + // 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
"0003 8000 " + // 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
"0000 " + // 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
"0000 A000 " + // 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
"0000 " + // 178: Entries[3].ActionIndex=0 (start at Action[0])
// Ligature actions table.
"3FFF FFE7 " + // 180: Action[0].Flags=0, .GlyphIndexDelta=-25
"3FFF FFED " + // 184: Action[1].Flags=0, .GlyphIndexDelta=-19
"BFFF FFF2 " + // 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
// Ligature component table.
"0000 0001 " + // 192: LigComponent[0]=0, LigComponent[1]=1
"0002 0003 " + // 196: LigComponent[2]=2, LigComponent[3]=3
"0000 0004 " + // 200: LigComponent[4]=0, LigComponent[5]=4
"0000 0008 " + // 204: LigComponent[6]=0, LigComponent[7]=8
"0010 " + // 208: LigComponent[8]=16
// Ligature list.
"03E8 03E9 " + // 210: LigList[0]=1000, LigList[1]=1001
"03EA 03EB " + // 214: LigList[2]=1002, LigList[3]=1003
"03EC 03ED " + // 218: LigList[4]=1004, LigList[3]=1005
"03EE 03EF ") // 222: LigList[5]=1006, LigList[6]=1007
tu.Assert(t, len(morxLigatureData) == 226)
out, _, err := ParseMorx(morxLigatureData, 1515)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0x80
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
lig, ok := subtable.Data.(MorxSubtableLigature)
tu.Assert(t, ok)
machine := lig.AATStateTableExt
tu.Assert(t, machine.StateSize == 7)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 20, LastGlyph: 22, Value: 4},
{FirstGlyph: 23, LastGlyph: 24, Value: 5},
{FirstGlyph: 26, LastGlyph: 28, Value: 6},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[0][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[1][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0000}, // State[2][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0003}, // State[3][0..6]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0},
{NewState: 0x0002, Flags: 0x8000},
{NewState: 0x0003, Flags: 0x8000},
{NewState: 0, Flags: 0xA000},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
expLigActions := []uint32{
0x3FFFFFE7,
0x3FFFFFED,
0xBFFFFFF2,
}
expComponents := []uint16{0, 1, 2, 3, 0, 4, 0, 8, 16}
expLigatures := []GlyphID{
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
}
tu.Assert(t, reflect.DeepEqual(lig.LigActions, expLigActions))
tu.Assert(t, reflect.DeepEqual(lig.Components, expComponents))
tu.Assert(t, reflect.DeepEqual(lig.Ligatures, expLigatures))
}
func TestMorxInsertion(t *testing.T) {
// imported from fonttools
// Taken from the `morx` table of the second font in DevanagariSangamMN.ttc
// on macOS X 10.12.6; manually pruned to just contain the insertion lookup.
morxInsertionData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00A4 " + // 12: StructLength=164 (+8=172)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 0094 " + // 24: Subtable[0].StructLength=148 (+24=172)
"00 " + // 28: Subtable[0].CoverageFlags=0x00
"00 00 " + // 29: Subtable[0].Reserved=0
"05 " + // 31: Subtable[0].MorphType=5/InsertionMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
"0000 0006 " + // 36: STXHeader.ClassCount=6
"0000 0014 " + // 40: STXHeader.ClassTableOffset=20 (+36=56)
"0000 004A " + // 44: STXHeader.StateArrayOffset=74 (+36=110)
"0000 006E " + // 48: STXHeader.EntryTableOffset=110 (+36=146)
"0000 0086 " + // 52: STXHeader.InsertionActionOffset=134 (+36=170)
// Glyph class table.
"0002 0006 " + // 56: ClassTable.LookupFormat=2, .UnitSize=6
"0006 0018 " + // 60: .NUnits=6, .SearchRange=24
"0002 000C " + // 64: .EntrySelector=2, .RangeShift=12
"00AC 00AC 0005 " + // 68: GlyphID 172..172 -> GlyphClass 5
"01EB 01E6 0005 " + // 74: GlyphID 486..491 -> GlyphClass 5
"01F0 01F0 0004 " + // 80: GlyphID 496..496 -> GlyphClass 4
"01F8 01F6 0004 " + // 88: GlyphID 502..504 -> GlyphClass 4
"01FC 01FA 0004 " + // 92: GlyphID 506..508 -> GlyphClass 4
"0250 0250 0005 " + // 98: GlyphID 592..592 -> GlyphClass 5
"FFFF FFFF 0000 " + // 104: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 " + // 110: State[0][0..5]
"0000 0000 0000 0000 0001 0000 " + // 122: State[1][0..5]
"0000 0000 0001 0000 0001 0002 " + // 134: State[2][0..5]
// Entry table.
"0000 0000 " + // 146: Entries[0].NewState=0, .Flags=0
"FFFF " + // 150: Entries[0].CurrentInsertIndex=<None>
"FFFF " + // 152: Entries[0].MarkedInsertIndex=<None>
"0002 0000 " + // 154: Entries[1].NewState=0, .Flags=0
"FFFF " + // 158: Entries[1].CurrentInsertIndex=<None>
"FFFF " + // 160: Entries[1].MarkedInsertIndex=<None>
"0000 " + // 162: Entries[2].NewState=0
"2820 " + // 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
// .CurrentInsertCount=1, .MarkedInsertCount=0
"0000 " + // 166: Entries[1].CurrentInsertIndex=0
"FFFF " + // 168: Entries[1].MarkedInsertIndex=<None>
// Insertion action table.
"022F") // 170: InsertionActionTable[0]=GlyphID 559
tu.Assert(t, len(morxInsertionData) == 172)
out, _, err := ParseMorx(morxInsertionData, 910)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
insert, ok := subtable.Data.(MorxSubtableInsertion)
tu.Assert(t, ok)
machine := insert.AATStateTableExt
tu.Assert(t, machine.StateSize == 6)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 172, LastGlyph: 172, Value: 5},
{FirstGlyph: 486, LastGlyph: 491, Value: 5},
{FirstGlyph: 496, LastGlyph: 496, Value: 4},
{FirstGlyph: 502, LastGlyph: 504, Value: 4},
{FirstGlyph: 506, LastGlyph: 508, Value: 4},
{FirstGlyph: 592, LastGlyph: 592, Value: 5},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 110: State[0][0..5]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 122: State[1][0..5]
{0x0000, 0x0000, 0x0001, 0x0000, 0x0001, 0x0002}, // 134: State[2][0..5]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0x0002, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0, Flags: 0x2820, data: [4]byte{0, 0, 0xff, 0xff}},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
tu.Assert(t, reflect.DeepEqual(insert.Insertions, []GlyphID{0x022f}))
}
func TestParseKerx(t *testing.T) {
for _, filepath := range []string{
"toys/tables/kerx0.bin",
"toys/tables/kerx2.bin",
"toys/tables/kerx2bis.bin",
"toys/tables/kerx24.bin",
"toys/tables/kerx4-1.bin",
"toys/tables/kerx4-2.bin",
"toys/tables/kerx6Exp-VF.bin",
"toys/tables/kerx6-VF.bin",
} {
table, err := td.Files.Rea | sting.T) {
// this is an invalid feat table, comming from a real font table (huh...)
table, err := td.Files.ReadFile("toys/tables/featInvalid.bin")
tu.AssertNoErr(t, err)
_, _, err = ParseFeat(table)
tu.Assert(t, err != nil)
}
func TestParseLtag(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ltag.bin")
tu.AssertNoErr(t, err)
ltag, _, err := ParseLtag(table)
tu.AssertNoErr(t, err)
tu.Assert(t, len(ltag.tagRange) == 1)
tu.Assert(t, ltag.Language(0) == "pl")
}
| dFile(filepath)
tu.AssertNoErr(t, err)
kerx, _, err := ParseKerx(table, 0xFF)
tu.AssertNoErr(t, err)
tu.Assert(t, len(kerx.Tables) > 0)
for _, subtable := range kerx.Tables {
tu.Assert(t, subtable.TupleCount > 0 == strings.Contains(filepath, "VF"))
switch data := subtable.Data.(type) {
case KerxData0:
tu.Assert(t, len(data.Pairs) > 0)
case KerxData2:
tu.Assert(t, data.Left != nil)
tu.Assert(t, data.Right != nil)
tu.Assert(t, int(data.KerningStart) <= len(data.KerningData))
case KerxData4:
tu.Assert(t, data.Anchors != nil)
}
}
}
}
func TestInvalidFeat(t *te | conditional_block |
aat_common_test.go | // SPDX-License-Identifier: Unlicense OR BSD-3-Clause
package tables
import (
"reflect"
"strings"
"testing"
td "github.com/go-text/typesetting-utils/opentype"
tu "github.com/go-text/typesetting/opentype/testutils"
)
func TestAATLookup4(t *testing.T) {
// adapted from fontttools
src := deHexStr(
"0004 0006 0003 000C 0001 0006 " +
"0002 0001 001E " + // glyph 1..2: mapping at offset 0x1E
"0005 0004 001E " + // glyph 4..5: mapping at offset 0x1E
"FFFF FFFF FFFF " + // end of search table
"0007 0008")
class, _, err := ParseAATLookup(src, 4)
tu.AssertNoErr(t, err)
gids := []GlyphID{1, 2, 4, 5}
classes := []uint16{7, 8, 7, 8}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found := class.Class(0xFF)
tu.Assert(t, !found)
// extracted from macos Tamil MN font
src = []byte{0, 4, 0, 6, 0, 5, 0, 24, 0, 2, 0, 6, 0, 151, 0, 129, 0, 42, 0, 156, 0, 153, 0, 88, 0, 163, 0, 163, 0, 96, 1, 48, 1, 48, 0, 98, 255, 255, 255, 255, 0, 100, 0, 4, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 32}
class, _, err = ParseAATLookup(src, 0xFFFF)
tu.AssertNoErr(t, err)
gids = []GlyphID{132, 129, 144, 145, 146, 140, 137, 130, 135, 138, 133, 139, 142, 143, 136, 134, 147, 141, 151, 132, 150, 148, 149, 304, 153, 154, 163, 155, 156}
classes = []uint16{
12, 4, 24, 25, 26, 20, 17, 10, 15, 18, 13, 19, 22, 23, 16, 14, 27, 21, 31, 12, 30, 28, 29, 32, 5, 6, 9, 7, 8,
}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found = class.Class(0xFF)
tu.Assert(t, !found)
}
func TestParseTrak(t *testing.T) {
fp := readFontFile(t, "toys/Trak.ttf")
trak, _, err := ParseTrak(readTable(t, fp, "trak"))
tu.AssertNoErr(t, err)
tu.Assert(t, len(trak.Horiz.SizeTable) == 4)
tu.Assert(t, len(trak.Vert.SizeTable) == 0)
tu.Assert(t, reflect.DeepEqual(trak.Horiz.SizeTable, []float32{1, 2, 12, 96}))
tu.Assert(t, reflect.DeepEqual(trak.Horiz.TrackTable[0].PerSizeTracking, []int16{200, 200, 0, -100}))
}
func TestParseFeat(t *testing.T) {
fp := readFontFile(t, "toys/Feat.ttf")
feat, _, err := ParseFeat(readTable(t, fp, "feat"))
tu.AssertNoErr(t, err)
expectedSettings := [...][]FeatureSettingName{
{{2, 260}, {4, 259}, {10, 304}},
{{0, 309}, {1, 263}, {3, 264}},
{{0, 266}, {1, 267}},
{{0, 271}, {2, 272}, {8, 273}},
{{0, 309}, {1, 275}, {2, 277}, {3, 278}},
{{0, 309}, {2, 280}},
{{0, 283}},
{{8, 308}},
{{0, 309}, {3, 289}},
{{0, 294}, {1, 295}, {2, 296}, {3, 297}},
{{0, 309}, {1, 301}},
}
tu.Assert(t, len(feat.Names) == len(expectedSettings))
for i, name := range feat.Names {
exp := expectedSettings[i]
got := name.SettingTable
tu.Assert(t, reflect.DeepEqual(exp, got))
}
}
func TestParseAnkr(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ankr.bin")
tu.AssertNoErr(t, err)
ankr, _, err := ParseAnkr(table, 1409)
tu.AssertNoErr(t, err)
_, isFormat4 := ankr.lookupTable.(AATLoopkup4)
tu.Assert(t, isFormat4)
}
func TestParseMorx(t *testing.T) {
files := tu.Filenames(t, "morx")
files = append(files, "toys/Trak.ttf")
for _, filename := range files {
fp := readFontFile(t, filename)
ng := numGlyphs(t, fp)
table, _, err := ParseMorx(readTable(t, fp, "morx"), ng)
tu.AssertNoErr(t, err)
tu.Assert(t, int(table.nChains) == len(table.Chains))
tu.Assert(t, int(table.nChains) == 1)
for _, chain := range table.Chains {
tu.AssertNoErr(t, err)
tu.Assert(t, len(chain.Subtables) == int(chain.nSubtable))
tu.Assert(t, chain.Flags == 1)
}
}
}
func TestMorxLigature(t *testing.T) {
// imported from fonttools
// Taken from “Example 2: A ligature table” in
// https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
// as retrieved on 2017-09-11.
//
// Compared to the example table in Apple’s specification, we’ve
// made the following changes:
//
// * at offsets 0..35, we’ve prepended 36 bytes of boilerplate
// to make the data a structurally valid ‘morx’ table;
//
// * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve
// changed the range of the third segment from 23..24 to 26..28.
// The hexdump values in Apple’s specification are completely wrong;
// the values from the comments would work, but they can be encoded
// more compactly than in the specification example. For round-trip
// testing, we omit the ‘f’ glyph, which makes AAT lookup format 2
// the most compact encoding;
//
// * at offsets 92..93 (offsets 56..57 in Apple’s document), we’ve
// changed the glyph class of the third segment from 5 to 6, which
// matches the values from the comments to the spec (but not the
// Apple’s hexdump).
morxLigatureData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00DA " + // 12: StructLength=218 (+8=226)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 00CA " + // 24: Subtable[0].StructLength=202 (+24=226)
"80 " + // 28: Subtable[0].CoverageFlags=0x80
"00 00 " + // 29: Subtable[0].Reserved=0
"02 " + // 31: Subtable[0].MorphType=2/LigatureMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
// State table header.
"0000 0007 " + // 36: STXHeader.ClassCount=7
"0000 001C " + // 40: STXHeader.ClassTableOffset=28 (+36=64)
"0000 0040 " + // 44: STXHeader.StateArrayOffset=64 (+36=100)
"0000 0078 " + // 48: STXHeader.EntryTableOffset=120 (+36=156)
"0000 0090 " + // 52: STXHeader.LigActionsOffset=144 (+36=180)
"0000 009C " + // 56: STXHeader.LigComponentsOffset=156 (+36=192)
"0000 00AE " + // 60: STXHeader.LigListOffset=174 (+36=210)
// Glyph class table.
"0002 0006 " + // 64: ClassTable.LookupFormat=2, .UnitSize=6
"0003 000C " + // 68: .NUnits=3, .SearchRange=12
"0001 0006 " + // 72: .EntrySelector=1, .RangeShift=6
"0016 0014 0004 " + // 76: GlyphID 20..22 [a..c] -> GlyphClass 4
"0018 0017 0005 " + // 82: GlyphID 23..24 [d..e] -> GlyphClass 5
"001C 001A 0006 " + // 88: GlyphID 26..28 [g..i] -> GlyphClass 6
"FFFF FFFF 0000 " + // 94: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 0000 " + // 100: State[0][0..6]
"0000 0000 0000 0000 0001 0000 0000 " + // 114: State[1][0..6]
"0000 0000 0000 0000 0001 0002 0000 " + // 128: State[2][0..6]
"0000 0000 0000 0000 0001 0002 0003 " + // 142: State[3][0..6]
// Entry table.
"0000 0000 " + // 156: Entries[0].NewState=0, .Flags=0
"0000 " + // 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
"0002 8000 " + // 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
"0000 " + // 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
"0003 8000 " + // 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
"0000 " + // 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
"0000 A000 " + // 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
"0000 " + // 178: Entries[3].ActionIndex=0 (start at Action[0])
// Ligature actions table.
"3FFF FFE7 " + // 180: Action[0].Flags=0, .GlyphIndexDelta=-25
"3FFF FFED " + // 184: Action[1].Flags=0, .GlyphIndexDelta=-19
"BFFF FFF2 " + // 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
// Ligature component table.
"0000 0001 " + // 192: LigComponent[0]=0, LigComponent[1]=1
"0002 0003 " + // 196: LigComponent[2]=2, LigComponent[3]=3
"0000 0004 " + // 200: LigComponent[4]=0, LigComponent[5]=4
"0000 0008 " + // 204: LigComponent[6]=0, LigComponent[7]=8
"0010 " + // 208: LigComponent[8]=16
// Ligature list.
"03E8 03E9 " + // 210: LigList[0]=1000, LigList[1]=1001
"03EA 03EB " + // 214: LigList[2]=1002, LigList[3]=1003
"03EC 03ED " + // 218: LigList[4]=1004, LigList[3]=1005
"03EE 03EF ") // 222: LigList[5]=1006, LigList[6]=1007
tu.Assert(t, len(morxLigatureData) == 226)
out, _, err := ParseMorx(morxLigatureData, 1515)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0x80
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
lig, ok := subtable.Data.(MorxSubtableLigature)
tu.Assert(t, ok)
machine := lig.AATStateTableExt
tu.Assert(t, machine.StateSize == 7)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 20, LastGlyph: 22, Value: 4},
{FirstGlyph: 23, LastGlyph: 24, Value: 5}, | {FirstGlyph: 26, LastGlyph: 28, Value: 6},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[0][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000}, // State[1][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0000}, // State[2][0..6]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0003}, // State[3][0..6]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0},
{NewState: 0x0002, Flags: 0x8000},
{NewState: 0x0003, Flags: 0x8000},
{NewState: 0, Flags: 0xA000},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
expLigActions := []uint32{
0x3FFFFFE7,
0x3FFFFFED,
0xBFFFFFF2,
}
expComponents := []uint16{0, 1, 2, 3, 0, 4, 0, 8, 16}
expLigatures := []GlyphID{
1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
}
tu.Assert(t, reflect.DeepEqual(lig.LigActions, expLigActions))
tu.Assert(t, reflect.DeepEqual(lig.Components, expComponents))
tu.Assert(t, reflect.DeepEqual(lig.Ligatures, expLigatures))
}
func TestMorxInsertion(t *testing.T) {
// imported from fonttools
// Taken from the `morx` table of the second font in DevanagariSangamMN.ttc
// on macOS X 10.12.6; manually pruned to just contain the insertion lookup.
morxInsertionData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00A4 " + // 12: StructLength=164 (+8=172)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 0094 " + // 24: Subtable[0].StructLength=148 (+24=172)
"00 " + // 28: Subtable[0].CoverageFlags=0x00
"00 00 " + // 29: Subtable[0].Reserved=0
"05 " + // 31: Subtable[0].MorphType=5/InsertionMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
"0000 0006 " + // 36: STXHeader.ClassCount=6
"0000 0014 " + // 40: STXHeader.ClassTableOffset=20 (+36=56)
"0000 004A " + // 44: STXHeader.StateArrayOffset=74 (+36=110)
"0000 006E " + // 48: STXHeader.EntryTableOffset=110 (+36=146)
"0000 0086 " + // 52: STXHeader.InsertionActionOffset=134 (+36=170)
// Glyph class table.
"0002 0006 " + // 56: ClassTable.LookupFormat=2, .UnitSize=6
"0006 0018 " + // 60: .NUnits=6, .SearchRange=24
"0002 000C " + // 64: .EntrySelector=2, .RangeShift=12
"00AC 00AC 0005 " + // 68: GlyphID 172..172 -> GlyphClass 5
"01EB 01E6 0005 " + // 74: GlyphID 486..491 -> GlyphClass 5
"01F0 01F0 0004 " + // 80: GlyphID 496..496 -> GlyphClass 4
"01F8 01F6 0004 " + // 88: GlyphID 502..504 -> GlyphClass 4
"01FC 01FA 0004 " + // 92: GlyphID 506..508 -> GlyphClass 4
"0250 0250 0005 " + // 98: GlyphID 592..592 -> GlyphClass 5
"FFFF FFFF 0000 " + // 104: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 " + // 110: State[0][0..5]
"0000 0000 0000 0000 0001 0000 " + // 122: State[1][0..5]
"0000 0000 0001 0000 0001 0002 " + // 134: State[2][0..5]
// Entry table.
"0000 0000 " + // 146: Entries[0].NewState=0, .Flags=0
"FFFF " + // 150: Entries[0].CurrentInsertIndex=<None>
"FFFF " + // 152: Entries[0].MarkedInsertIndex=<None>
"0002 0000 " + // 154: Entries[1].NewState=0, .Flags=0
"FFFF " + // 158: Entries[1].CurrentInsertIndex=<None>
"FFFF " + // 160: Entries[1].MarkedInsertIndex=<None>
"0000 " + // 162: Entries[2].NewState=0
"2820 " + // 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
// .CurrentInsertCount=1, .MarkedInsertCount=0
"0000 " + // 166: Entries[1].CurrentInsertIndex=0
"FFFF " + // 168: Entries[1].MarkedInsertIndex=<None>
// Insertion action table.
"022F") // 170: InsertionActionTable[0]=GlyphID 559
tu.Assert(t, len(morxInsertionData) == 172)
out, _, err := ParseMorx(morxInsertionData, 910)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
insert, ok := subtable.Data.(MorxSubtableInsertion)
tu.Assert(t, ok)
machine := insert.AATStateTableExt
tu.Assert(t, machine.StateSize == 6)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 172, LastGlyph: 172, Value: 5},
{FirstGlyph: 486, LastGlyph: 491, Value: 5},
{FirstGlyph: 496, LastGlyph: 496, Value: 4},
{FirstGlyph: 502, LastGlyph: 504, Value: 4},
{FirstGlyph: 506, LastGlyph: 508, Value: 4},
{FirstGlyph: 592, LastGlyph: 592, Value: 5},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 110: State[0][0..5]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 122: State[1][0..5]
{0x0000, 0x0000, 0x0001, 0x0000, 0x0001, 0x0002}, // 134: State[2][0..5]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0x0002, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0, Flags: 0x2820, data: [4]byte{0, 0, 0xff, 0xff}},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
tu.Assert(t, reflect.DeepEqual(insert.Insertions, []GlyphID{0x022f}))
}
func TestParseKerx(t *testing.T) {
for _, filepath := range []string{
"toys/tables/kerx0.bin",
"toys/tables/kerx2.bin",
"toys/tables/kerx2bis.bin",
"toys/tables/kerx24.bin",
"toys/tables/kerx4-1.bin",
"toys/tables/kerx4-2.bin",
"toys/tables/kerx6Exp-VF.bin",
"toys/tables/kerx6-VF.bin",
} {
table, err := td.Files.ReadFile(filepath)
tu.AssertNoErr(t, err)
kerx, _, err := ParseKerx(table, 0xFF)
tu.AssertNoErr(t, err)
tu.Assert(t, len(kerx.Tables) > 0)
for _, subtable := range kerx.Tables {
tu.Assert(t, subtable.TupleCount > 0 == strings.Contains(filepath, "VF"))
switch data := subtable.Data.(type) {
case KerxData0:
tu.Assert(t, len(data.Pairs) > 0)
case KerxData2:
tu.Assert(t, data.Left != nil)
tu.Assert(t, data.Right != nil)
tu.Assert(t, int(data.KerningStart) <= len(data.KerningData))
case KerxData4:
tu.Assert(t, data.Anchors != nil)
}
}
}
}
func TestInvalidFeat(t *testing.T) {
// this is an invalid feat table, comming from a real font table (huh...)
table, err := td.Files.ReadFile("toys/tables/featInvalid.bin")
tu.AssertNoErr(t, err)
_, _, err = ParseFeat(table)
tu.Assert(t, err != nil)
}
func TestParseLtag(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ltag.bin")
tu.AssertNoErr(t, err)
ltag, _, err := ParseLtag(table)
tu.AssertNoErr(t, err)
tu.Assert(t, len(ltag.tagRange) == 1)
tu.Assert(t, ltag.Language(0) == "pl")
} | random_line_split | |
args.go | // Copyright 2018 The go-python Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Argument parsing for Go functions called by python
//
// These functions are useful when creating your own extensions
// functions and methods. Additional information and examples are
// available in Extending and Embedding the Python Interpreter.
//
// The first three of these functions described, PyArg_ParseTuple(),
// PyArg_ParseTupleAndKeywords(), and PyArg_Parse(), all use format
// strings which are used to tell the function about the expected
// arguments. The format strings use the same syntax for each of these
// functions.
//
// Parsing arguments
//
// A format string consists of zero or more “format units.” A format
// unit describes one Python object; it is usually a single character
// or a parenthesized sequence of format units. With a few exceptions,
// a format unit that is not a parenthesized sequence normally
// corresponds to a single address argument to these functions. In the
// following description, the quoted form is the format unit; the
// entry in (round) parentheses is the Python object type that matches
// the format unit; and the entry in [square] brackets is the type of
// the C variable(s) whose address should be passed.
//
// s (str) [const char *]
//
// Convert a Unicode object to a C pointer to a character string. A
// pointer to an existing string is stored in the character pointer
// variable whose address you pass. The C string is
// NUL-terminated. The Python string must not contain embedded NUL
// bytes; if it does, a TypeError exception is raised. Unicode objects
// are converted to C strings using 'utf-8' encoding. If this
// conversion fails, a UnicodeError is raised.
//
// Note This format does not accept bytes-like objects. If you want to
// accept filesystem paths and convert them to C character strings, it
// is preferable to use the O& format with PyUnicode_FSConverter() as
// converter.
//
// s* (str, bytes, bytearray or buffer compatible object) [Py_buffer]
//
// This format accepts Unicode objects as well as bytes-like
// objects. It fills a Py_buffer structure provided by the caller. In
// this case the resulting C string may contain embedded NUL
// bytes. Unicode objects are converted to C strings using 'utf-8'
// encoding.
//
// s# (str, bytes or read-only buffer compatible object) [const char *, int or Py_ssize_t]
//
// Like s*, except that it doesn’t accept mutable buffer-like objects
// such as bytearray. The result is stored into two C variables, the
// first one a pointer to a C string, the second one its length. The
// string may contain embedded null bytes. Unicode objects are
// converted to C strings using 'utf-8' encoding.
//
// z (str or None) [const char *]
//
// Like s, but the Python object may also be None, in which case the C
// pointer is set to NULL.
//
// z* (str, bytes, bytearray, buffer compatible object or None)
// [Py_buffer]
//
// Like s*, but the Python object may also be None, in which case the
// buf member of the Py_buffer structure is set to NULL.
//
// z# (str, bytes, read-only buffer compatible object or None) [const
// char *, int]
//
// Like s#, but the Python object may also be None, in which case the
// C pointer is set to NULL.
//
// y (bytes) [const char *]
//
// This format converts a bytes-like object to a C pointer to a
// character string; it does not accept Unicode objects. The bytes
// buffer must not contain embedded NUL bytes; if it does, a TypeError
// exception is raised.
//
// y* (bytes, bytearray or bytes-like object) [Py_buffer]
//
// This variant on s* doesn’t accept Unicode objects, only bytes-like
// objects. This is the recommended way to accept binary data.
//
// y# (bytes) [const char *, int]
//
// This variant on s# doesn’t accept Unicode objects, only bytes-like
// objects.
//
// S (bytes) [PyBytesObject *]
//
// Requires that the Python object is a bytes object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytes object. The C variable may also be declared as PyObject*.
//
// Y (bytearray) [PyByteArrayObject *]
//
// Requires that the Python object is a bytearray object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytearray object. The C variable may also be declared as PyObject*.
//
// u (str) [Py_UNICODE *]
//
// Convert a Python Unicode object to a C pointer to a NUL-terminated
// buffer of Unicode characters. You must pass the address of a
// Py_UNICODE pointer variable, which will be filled with the pointer
// to an existing Unicode buffer. Please note that the width of a
// Py_UNICODE character depends on compilation options (it is either
// 16 or 32 bits). The Python string must not contain embedded NUL
// characters; if it does, a TypeError exception is raised.
//
// Note Since u doesn’t give you back the length of the string, and it
// may contain embedded NUL characters, it is recommended to use u# or
// U instead.
//
// u# (str) [Py_UNICODE *, int]
//
// This variant on u stores into two C variables, the first one a
// pointer to a Unicode data buffer, the second one its length.
//
// Z (str or None) [Py_UNICODE *]
//
// Like u, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// Z# (str or None) [Py_UNICODE *, int]
//
// Like u#, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// U (str) [PyObject *]
//
// Requires that the Python object is a Unicode object, without
// attempting any conversion. Raises TypeError if the object is not a
// Unicode object. The C variable may also be declared as PyObject*.
//
// w* (bytearray or read-write byte-oriented buffer) [Py_buffer]
//
// This format accepts any object which implements the read-write
// buffer interface. It fills a Py_buffer structure provided by the
// caller. The buffer may contain embedded null bytes. The caller have
// to call PyBuffer_Release() when it is done with the buffer.
//
// es (str) [const char *encoding, char **buffer]
//
// This variant on s is used for encoding Unicode into a character
// buffer. It only works for encoded data without embedded NUL bytes.
//
// This format requires two arguments. The first is only used as
// input, and must be a const char* which points to the name of an
// encoding as a NUL-terminated string, or NULL, in which case 'utf-8'
// encoding is used. An exception is raised if the named encoding is
// not known to Python. The second argument must be a char**; the
// value of the pointer it references will be set to a buffer with the
// contents of the argument text. The text will be encoded in the
// encoding specified by the first argument.
//
// PyArg_ParseTuple() will allocate a buffer of the needed size, copy
// the encoded data into this buffer and adjust *buffer to reference
// the newly allocated storage. The caller is responsible for calling
// PyMem_Free() to free the allocated buffer after use.
//
// et (str, bytes or bytearray) [const char *encoding, char **buffer]
//
// Same as es except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// es# (str) [const char *encoding, char **buffer, int *buffer_length]
//
// This variant on s# is used for encoding Unicode into a character
// buffer. Unlike the es format, this variant allows input data which
// contains NUL characters.
//
// It requires three arguments. The first is only used as input, and
// must be a const char* which points to the name of an encoding as a
// NUL-terminated string, or NULL, in which case 'utf-8' encoding is
// used. An exception is raised if the named encoding is not known to
// Python. The second argument must be a char**; the value of the
// pointer it references will be set to a buffer with the contents of
// the argument text. The text will be encoded in the encoding
// specified by the first argument. The third argument must be a
// pointer to an integer; the referenced integer will be set to the
// number of bytes in the output buffer.
//
// There are two modes of operation:
//
// If *buffer points a NULL pointer, the function will allocate a
// buffer of the needed size, copy the encoded data into this buffer
// and set *buffer to reference the newly allocated storage. The
// caller is responsible for calling PyMem_Free() to free the
// allocated buffer after usage.
//
// If *buffer points to a non-NULL pointer (an already allocated
// buffer), PyArg_ParseTuple() will use this location as the buffer
// and interpret the initial value of *buffer_length as the buffer
// size. It will then copy the encoded data into the buffer and
// NUL-terminate it. If the buffer is not large enough, a ValueError
// will be set.
//
// In both cases, *buffer_length is set to the length of the encoded
// data without the trailing NUL byte.
//
// et# (str, bytes or bytearray) [const char *encoding, char **buffer,
// int *buffer_length]
//
// Same as es# except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// Numbers
//
// b (int) [unsigned char]
//
// Convert a nonnegative Python integer to an unsigned tiny int,
// stored in a C unsigned char.
//
// B (int) [unsigned char]
//
// Convert a Python integer to a tiny int without overflow checking,
// stored in a C unsigned char. h (int) [short int]
//
// Convert a Python integer to a C short int.
//
// H (int) [unsigned short int]
//
// Convert a Python integer to a C unsigned short int, without
// overflow checking.
//
// i (int) [int]
//
// Convert a Python integer to a plain C int.
//
// I (int) [unsigned int]
//
// Convert a Python integer to a C unsigned int, without overflow
// checking.
//
// l (int) [long int]
//
// Convert a Python integer to a C long int.
//
// k (int) [unsigned long]
//
// Convert a Python integer to a C unsigned long without overflow
// checking.
//
// L (int) [PY_LONG_LONG]
//
// Convert a Python integer to a C long long. This format is only
// available on platforms that support long long (or _int64 on
// Windows).
//
// K (int) [unsigned PY_LONG_LONG]
//
// Convert a Python integer to a C unsigned long long without overflow
// checking. This format is only available on platforms that support
// unsigned long long (or unsigned _int64 on Windows).
//
// n (int) [Py_ssize_t]
//
// Convert a Python integer to a C Py_ssize_t.
//
// c (bytes or bytearray of length 1) [char]
//
// Convert a Python byte, represented as a bytes or bytearray object
// of length 1, to a C char.
//
// Changed in version 3.3: Allow bytearray objects.
//
// C (str of length 1) [int]
//
// Convert a Python character, represented as a str object of length 1, to a C int.
//
// f (float) [float]
//
// Convert a Python floating point number to a C float.
//
// d (float) [double]
//
// Convert a Python floating point number to a C double.
//
// D (complex) [Py_complex]
//
// Convert a Python complex number to a C Py_complex structure.
//
// Other objects
//
// O (object) [PyObject *]
//
// Store a Python object (without any conversion) in a C object
// pointer. The C program thus receives the actual object that was
// passed. The object’s reference count is not increased. The pointer
// stored is not NULL.
//
// O! (object) [typeobject, PyObject *]
//
// Store a Python object in a C object pointer. This is similar to O,
// but takes two C arguments: the first is the address of a Python
// type object, the second is the address of the C variable (of type
// PyObject*) into which the object pointer is stored. If the Python
// object does not have the required type, TypeError is raised.
//
// O& (object) [converter, anything]
//
// Convert a Python object to a C variable through a converter
// function. This takes two arguments: the first is a function, the
// second is the address of a C variable (of arbitrary type),
// converted to void *. The converter function in turn is called as
// follows:
//
// status = converter(object, address);
//
// where object is the Python object to be converted and address is
// the void* argument that was passed to the PyArg_Parse*()
// function. The returned status should be 1 for a successful
// conversion and 0 if the conversion has failed. When the conversion
// fails, the converter function should raise an exception and leave
// the content of address unmodified.
//
// If the converter returns Py_CLEANUP_SUPPORTED, it may get called a
// second time if the argument parsing eventually fails, giving the
// converter a chance to release any memory that it had already
// allocated. In this second call, the object parameter will be NULL;
// address will have the same value as in the original call.
//
// Changed in version 3.1: Py_CLEANUP_SUPPORTED was added.
//
// p (bool) [int]
//
// Tests the value passed in for truth (a boolean predicate) and
// converts the result to its equivalent C true/false integer
// value. Sets the int to 1 if the expression was true and 0 if it was
// false. This accepts any valid Python value. See Truth Value Testing
// for more information about how Python tests values for truth.
//
// New in version 3.3.
//
// (items) (tuple) [matching-items]
//
// The object must be a Python sequence whose length is the number of
// format units in items. The C arguments must correspond to the
// individual format units in items. Format units for sequences may be
// nested.
//
// It is possible to pass “long” integers (integers whose value
// exceeds the platform’s LONG_MAX) however no proper range checking
// is done — the most significant bits are silently truncated when the
// receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := results[i]
switch op.code {
case 'O':
*result = arg
case 'Z':
switch op.modifier {
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
case '#', 0:
switch arg := arg.(type) {
case String, NoneType:
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'z':
switch op.modifier {
default:
switch arg := arg.(type) {
case String, NoneType:
// ok
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes, NoneType:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str, bytes-like or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'U':
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 's':
switch op.modifier {
default:
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'y':
switch op.modifier {
default:
if _, ok := arg.(Bytes); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'i', 'n':
if _, ok := arg.(Int); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be int, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'p':
if _, ok := arg.(Bool); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bool, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'd':
switch x := arg.(type) {
case Int:
*result = Float(x)
case Float:
*result = x
default:
return ExceptionNewf(TypeError, "%s() argument %d must be float, not %s", name, i+1, arg.Type().Name)
}
default:
return ExceptionNewf(TypeError, "Unknown/Unimplemented format character %q in ParseTupleAndKeywords called from %s", op, name)
}
}
return nil
}
// Parse tuple only
func ParseTuple(args Tuple, format | sults ...*Object) error {
return ParseTupleAndKeywords(args, nil, format, nil, results...)
}
type formatOp struct {
code byte
modifier byte
}
// Parse the format
func parseFormat(format string, in []formatOp) (min int, name string, kwOnly_i int, ops []formatOp) {
name = "function"
min = -1
kwOnly_i = 0xFFFF
ops = in[:0]
N := len(format)
for i := 0; i < N; {
op := formatOp{code: format[i]}
i++
if i < N {
if mod := format[i]; mod == '*' || mod == '#' {
op.modifier = mod
i++
}
}
switch op.code {
case ':', ';':
name = format[i:]
i = N
case '$':
kwOnly_i = len(ops)
case '|':
min = len(ops)
default:
ops = append(ops, op)
}
}
if min < 0 {
min = len(ops)
}
return
}
// Checks the number of args passed in
func checkNumberOfArgs(name string, nargs, nresults, min, max int) error {
if min == max {
if nargs != max {
return ExceptionNewf(TypeError, "%s() takes exactly %d arguments (%d given)", name, max, nargs)
}
} else {
if nargs > max {
return ExceptionNewf(TypeError, "%s() takes at most %d arguments (%d given)", name, max, nargs)
}
if nargs < min {
return ExceptionNewf(TypeError, "%s() takes at least %d arguments (%d given)", name, min, nargs)
}
}
if nargs > nresults {
return ExceptionNewf(TypeError, "Internal error: not enough arguments supplied to Unpack*/Parse*")
}
return nil
}
// Unpack the args tuple into the results
//
// Up to the caller to set default values
func UnpackTuple(args Tuple, kwargs StringDict, name string, min int, max int, results ...*Object) error {
if len(kwargs) != 0 {
return ExceptionNewf(TypeError, "%s() does not take keyword arguments", name)
}
// Check number of arguments
err := checkNumberOfArgs(name, len(args), len(results), min, max)
if err != nil {
return err
}
// Copy the results in
for i := range args {
*results[i] = args[i]
}
return nil
}
| string, re | identifier_name |
args.go | // Copyright 2018 The go-python Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Argument parsing for Go functions called by python
//
// These functions are useful when creating your own extensions
// functions and methods. Additional information and examples are
// available in Extending and Embedding the Python Interpreter.
//
// The first three of these functions described, PyArg_ParseTuple(),
// PyArg_ParseTupleAndKeywords(), and PyArg_Parse(), all use format
// strings which are used to tell the function about the expected
// arguments. The format strings use the same syntax for each of these
// functions.
//
// Parsing arguments
//
// A format string consists of zero or more “format units.” A format
// unit describes one Python object; it is usually a single character
// or a parenthesized sequence of format units. With a few exceptions,
// a format unit that is not a parenthesized sequence normally
// corresponds to a single address argument to these functions. In the
// following description, the quoted form is the format unit; the
// entry in (round) parentheses is the Python object type that matches
// the format unit; and the entry in [square] brackets is the type of
// the C variable(s) whose address should be passed.
//
// s (str) [const char *]
//
// Convert a Unicode object to a C pointer to a character string. A
// pointer to an existing string is stored in the character pointer
// variable whose address you pass. The C string is
// NUL-terminated. The Python string must not contain embedded NUL
// bytes; if it does, a TypeError exception is raised. Unicode objects
// are converted to C strings using 'utf-8' encoding. If this
// conversion fails, a UnicodeError is raised.
//
// Note This format does not accept bytes-like objects. If you want to
// accept filesystem paths and convert them to C character strings, it
// is preferable to use the O& format with PyUnicode_FSConverter() as
// converter.
//
// s* (str, bytes, bytearray or buffer compatible object) [Py_buffer]
//
// This format accepts Unicode objects as well as bytes-like
// objects. It fills a Py_buffer structure provided by the caller. In
// this case the resulting C string may contain embedded NUL
// bytes. Unicode objects are converted to C strings using 'utf-8'
// encoding.
//
// s# (str, bytes or read-only buffer compatible object) [const char *, int or Py_ssize_t]
//
// Like s*, except that it doesn’t accept mutable buffer-like objects
// such as bytearray. The result is stored into two C variables, the
// first one a pointer to a C string, the second one its length. The
// string may contain embedded null bytes. Unicode objects are
// converted to C strings using 'utf-8' encoding.
//
// z (str or None) [const char *]
//
// Like s, but the Python object may also be None, in which case the C
// pointer is set to NULL.
//
// z* (str, bytes, bytearray, buffer compatible object or None)
// [Py_buffer]
//
// Like s*, but the Python object may also be None, in which case the
// buf member of the Py_buffer structure is set to NULL.
//
// z# (str, bytes, read-only buffer compatible object or None) [const
// char *, int]
//
// Like s#, but the Python object may also be None, in which case the
// C pointer is set to NULL.
//
// y (bytes) [const char *]
// | // This format converts a bytes-like object to a C pointer to a
// character string; it does not accept Unicode objects. The bytes
// buffer must not contain embedded NUL bytes; if it does, a TypeError
// exception is raised.
//
// y* (bytes, bytearray or bytes-like object) [Py_buffer]
//
// This variant on s* doesn’t accept Unicode objects, only bytes-like
// objects. This is the recommended way to accept binary data.
//
// y# (bytes) [const char *, int]
//
// This variant on s# doesn’t accept Unicode objects, only bytes-like
// objects.
//
// S (bytes) [PyBytesObject *]
//
// Requires that the Python object is a bytes object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytes object. The C variable may also be declared as PyObject*.
//
// Y (bytearray) [PyByteArrayObject *]
//
// Requires that the Python object is a bytearray object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytearray object. The C variable may also be declared as PyObject*.
//
// u (str) [Py_UNICODE *]
//
// Convert a Python Unicode object to a C pointer to a NUL-terminated
// buffer of Unicode characters. You must pass the address of a
// Py_UNICODE pointer variable, which will be filled with the pointer
// to an existing Unicode buffer. Please note that the width of a
// Py_UNICODE character depends on compilation options (it is either
// 16 or 32 bits). The Python string must not contain embedded NUL
// characters; if it does, a TypeError exception is raised.
//
// Note Since u doesn’t give you back the length of the string, and it
// may contain embedded NUL characters, it is recommended to use u# or
// U instead.
//
// u# (str) [Py_UNICODE *, int]
//
// This variant on u stores into two C variables, the first one a
// pointer to a Unicode data buffer, the second one its length.
//
// Z (str or None) [Py_UNICODE *]
//
// Like u, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// Z# (str or None) [Py_UNICODE *, int]
//
// Like u#, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// U (str) [PyObject *]
//
// Requires that the Python object is a Unicode object, without
// attempting any conversion. Raises TypeError if the object is not a
// Unicode object. The C variable may also be declared as PyObject*.
//
// w* (bytearray or read-write byte-oriented buffer) [Py_buffer]
//
// This format accepts any object which implements the read-write
// buffer interface. It fills a Py_buffer structure provided by the
// caller. The buffer may contain embedded null bytes. The caller have
// to call PyBuffer_Release() when it is done with the buffer.
//
// es (str) [const char *encoding, char **buffer]
//
// This variant on s is used for encoding Unicode into a character
// buffer. It only works for encoded data without embedded NUL bytes.
//
// This format requires two arguments. The first is only used as
// input, and must be a const char* which points to the name of an
// encoding as a NUL-terminated string, or NULL, in which case 'utf-8'
// encoding is used. An exception is raised if the named encoding is
// not known to Python. The second argument must be a char**; the
// value of the pointer it references will be set to a buffer with the
// contents of the argument text. The text will be encoded in the
// encoding specified by the first argument.
//
// PyArg_ParseTuple() will allocate a buffer of the needed size, copy
// the encoded data into this buffer and adjust *buffer to reference
// the newly allocated storage. The caller is responsible for calling
// PyMem_Free() to free the allocated buffer after use.
//
// et (str, bytes or bytearray) [const char *encoding, char **buffer]
//
// Same as es except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// es# (str) [const char *encoding, char **buffer, int *buffer_length]
//
// This variant on s# is used for encoding Unicode into a character
// buffer. Unlike the es format, this variant allows input data which
// contains NUL characters.
//
// It requires three arguments. The first is only used as input, and
// must be a const char* which points to the name of an encoding as a
// NUL-terminated string, or NULL, in which case 'utf-8' encoding is
// used. An exception is raised if the named encoding is not known to
// Python. The second argument must be a char**; the value of the
// pointer it references will be set to a buffer with the contents of
// the argument text. The text will be encoded in the encoding
// specified by the first argument. The third argument must be a
// pointer to an integer; the referenced integer will be set to the
// number of bytes in the output buffer.
//
// There are two modes of operation:
//
// If *buffer points a NULL pointer, the function will allocate a
// buffer of the needed size, copy the encoded data into this buffer
// and set *buffer to reference the newly allocated storage. The
// caller is responsible for calling PyMem_Free() to free the
// allocated buffer after usage.
//
// If *buffer points to a non-NULL pointer (an already allocated
// buffer), PyArg_ParseTuple() will use this location as the buffer
// and interpret the initial value of *buffer_length as the buffer
// size. It will then copy the encoded data into the buffer and
// NUL-terminate it. If the buffer is not large enough, a ValueError
// will be set.
//
// In both cases, *buffer_length is set to the length of the encoded
// data without the trailing NUL byte.
//
// et# (str, bytes or bytearray) [const char *encoding, char **buffer,
// int *buffer_length]
//
// Same as es# except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// Numbers
//
// b (int) [unsigned char]
//
// Convert a nonnegative Python integer to an unsigned tiny int,
// stored in a C unsigned char.
//
// B (int) [unsigned char]
//
// Convert a Python integer to a tiny int without overflow checking,
// stored in a C unsigned char. h (int) [short int]
//
// Convert a Python integer to a C short int.
//
// H (int) [unsigned short int]
//
// Convert a Python integer to a C unsigned short int, without
// overflow checking.
//
// i (int) [int]
//
// Convert a Python integer to a plain C int.
//
// I (int) [unsigned int]
//
// Convert a Python integer to a C unsigned int, without overflow
// checking.
//
// l (int) [long int]
//
// Convert a Python integer to a C long int.
//
// k (int) [unsigned long]
//
// Convert a Python integer to a C unsigned long without overflow
// checking.
//
// L (int) [PY_LONG_LONG]
//
// Convert a Python integer to a C long long. This format is only
// available on platforms that support long long (or _int64 on
// Windows).
//
// K (int) [unsigned PY_LONG_LONG]
//
// Convert a Python integer to a C unsigned long long without overflow
// checking. This format is only available on platforms that support
// unsigned long long (or unsigned _int64 on Windows).
//
// n (int) [Py_ssize_t]
//
// Convert a Python integer to a C Py_ssize_t.
//
// c (bytes or bytearray of length 1) [char]
//
// Convert a Python byte, represented as a bytes or bytearray object
// of length 1, to a C char.
//
// Changed in version 3.3: Allow bytearray objects.
//
// C (str of length 1) [int]
//
// Convert a Python character, represented as a str object of length 1, to a C int.
//
// f (float) [float]
//
// Convert a Python floating point number to a C float.
//
// d (float) [double]
//
// Convert a Python floating point number to a C double.
//
// D (complex) [Py_complex]
//
// Convert a Python complex number to a C Py_complex structure.
//
// Other objects
//
// O (object) [PyObject *]
//
// Store a Python object (without any conversion) in a C object
// pointer. The C program thus receives the actual object that was
// passed. The object’s reference count is not increased. The pointer
// stored is not NULL.
//
// O! (object) [typeobject, PyObject *]
//
// Store a Python object in a C object pointer. This is similar to O,
// but takes two C arguments: the first is the address of a Python
// type object, the second is the address of the C variable (of type
// PyObject*) into which the object pointer is stored. If the Python
// object does not have the required type, TypeError is raised.
//
// O& (object) [converter, anything]
//
// Convert a Python object to a C variable through a converter
// function. This takes two arguments: the first is a function, the
// second is the address of a C variable (of arbitrary type),
// converted to void *. The converter function in turn is called as
// follows:
//
// status = converter(object, address);
//
// where object is the Python object to be converted and address is
// the void* argument that was passed to the PyArg_Parse*()
// function. The returned status should be 1 for a successful
// conversion and 0 if the conversion has failed. When the conversion
// fails, the converter function should raise an exception and leave
// the content of address unmodified.
//
// If the converter returns Py_CLEANUP_SUPPORTED, it may get called a
// second time if the argument parsing eventually fails, giving the
// converter a chance to release any memory that it had already
// allocated. In this second call, the object parameter will be NULL;
// address will have the same value as in the original call.
//
// Changed in version 3.1: Py_CLEANUP_SUPPORTED was added.
//
// p (bool) [int]
//
// Tests the value passed in for truth (a boolean predicate) and
// converts the result to its equivalent C true/false integer
// value. Sets the int to 1 if the expression was true and 0 if it was
// false. This accepts any valid Python value. See Truth Value Testing
// for more information about how Python tests values for truth.
//
// New in version 3.3.
//
// (items) (tuple) [matching-items]
//
// The object must be a Python sequence whose length is the number of
// format units in items. The C arguments must correspond to the
// individual format units in items. Format units for sequences may be
// nested.
//
// It is possible to pass “long” integers (integers whose value
// exceeds the platform’s LONG_MAX) however no proper range checking
// is done — the most significant bits are silently truncated when the
// receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := results[i]
switch op.code {
case 'O':
*result = arg
case 'Z':
switch op.modifier {
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
case '#', 0:
switch arg := arg.(type) {
case String, NoneType:
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'z':
switch op.modifier {
default:
switch arg := arg.(type) {
case String, NoneType:
// ok
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes, NoneType:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str, bytes-like or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'U':
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 's':
switch op.modifier {
default:
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'y':
switch op.modifier {
default:
if _, ok := arg.(Bytes); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'i', 'n':
if _, ok := arg.(Int); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be int, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'p':
if _, ok := arg.(Bool); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bool, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'd':
switch x := arg.(type) {
case Int:
*result = Float(x)
case Float:
*result = x
default:
return ExceptionNewf(TypeError, "%s() argument %d must be float, not %s", name, i+1, arg.Type().Name)
}
default:
return ExceptionNewf(TypeError, "Unknown/Unimplemented format character %q in ParseTupleAndKeywords called from %s", op, name)
}
}
return nil
}
// Parse tuple only
func ParseTuple(args Tuple, format string, results ...*Object) error {
return ParseTupleAndKeywords(args, nil, format, nil, results...)
}
type formatOp struct {
code byte
modifier byte
}
// Parse the format
func parseFormat(format string, in []formatOp) (min int, name string, kwOnly_i int, ops []formatOp) {
name = "function"
min = -1
kwOnly_i = 0xFFFF
ops = in[:0]
N := len(format)
for i := 0; i < N; {
op := formatOp{code: format[i]}
i++
if i < N {
if mod := format[i]; mod == '*' || mod == '#' {
op.modifier = mod
i++
}
}
switch op.code {
case ':', ';':
name = format[i:]
i = N
case '$':
kwOnly_i = len(ops)
case '|':
min = len(ops)
default:
ops = append(ops, op)
}
}
if min < 0 {
min = len(ops)
}
return
}
// Checks the number of args passed in
func checkNumberOfArgs(name string, nargs, nresults, min, max int) error {
if min == max {
if nargs != max {
return ExceptionNewf(TypeError, "%s() takes exactly %d arguments (%d given)", name, max, nargs)
}
} else {
if nargs > max {
return ExceptionNewf(TypeError, "%s() takes at most %d arguments (%d given)", name, max, nargs)
}
if nargs < min {
return ExceptionNewf(TypeError, "%s() takes at least %d arguments (%d given)", name, min, nargs)
}
}
if nargs > nresults {
return ExceptionNewf(TypeError, "Internal error: not enough arguments supplied to Unpack*/Parse*")
}
return nil
}
// Unpack the args tuple into the results
//
// Up to the caller to set default values
func UnpackTuple(args Tuple, kwargs StringDict, name string, min int, max int, results ...*Object) error {
if len(kwargs) != 0 {
return ExceptionNewf(TypeError, "%s() does not take keyword arguments", name)
}
// Check number of arguments
err := checkNumberOfArgs(name, len(args), len(results), min, max)
if err != nil {
return err
}
// Copy the results in
for i := range args {
*results[i] = args[i]
}
return nil
} | random_line_split | |
args.go | // Copyright 2018 The go-python Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Argument parsing for Go functions called by python
//
// These functions are useful when creating your own extensions
// functions and methods. Additional information and examples are
// available in Extending and Embedding the Python Interpreter.
//
// The first three of these functions described, PyArg_ParseTuple(),
// PyArg_ParseTupleAndKeywords(), and PyArg_Parse(), all use format
// strings which are used to tell the function about the expected
// arguments. The format strings use the same syntax for each of these
// functions.
//
// Parsing arguments
//
// A format string consists of zero or more “format units.” A format
// unit describes one Python object; it is usually a single character
// or a parenthesized sequence of format units. With a few exceptions,
// a format unit that is not a parenthesized sequence normally
// corresponds to a single address argument to these functions. In the
// following description, the quoted form is the format unit; the
// entry in (round) parentheses is the Python object type that matches
// the format unit; and the entry in [square] brackets is the type of
// the C variable(s) whose address should be passed.
//
// s (str) [const char *]
//
// Convert a Unicode object to a C pointer to a character string. A
// pointer to an existing string is stored in the character pointer
// variable whose address you pass. The C string is
// NUL-terminated. The Python string must not contain embedded NUL
// bytes; if it does, a TypeError exception is raised. Unicode objects
// are converted to C strings using 'utf-8' encoding. If this
// conversion fails, a UnicodeError is raised.
//
// Note This format does not accept bytes-like objects. If you want to
// accept filesystem paths and convert them to C character strings, it
// is preferable to use the O& format with PyUnicode_FSConverter() as
// converter.
//
// s* (str, bytes, bytearray or buffer compatible object) [Py_buffer]
//
// This format accepts Unicode objects as well as bytes-like
// objects. It fills a Py_buffer structure provided by the caller. In
// this case the resulting C string may contain embedded NUL
// bytes. Unicode objects are converted to C strings using 'utf-8'
// encoding.
//
// s# (str, bytes or read-only buffer compatible object) [const char *, int or Py_ssize_t]
//
// Like s*, except that it doesn’t accept mutable buffer-like objects
// such as bytearray. The result is stored into two C variables, the
// first one a pointer to a C string, the second one its length. The
// string may contain embedded null bytes. Unicode objects are
// converted to C strings using 'utf-8' encoding.
//
// z (str or None) [const char *]
//
// Like s, but the Python object may also be None, in which case the C
// pointer is set to NULL.
//
// z* (str, bytes, bytearray, buffer compatible object or None)
// [Py_buffer]
//
// Like s*, but the Python object may also be None, in which case the
// buf member of the Py_buffer structure is set to NULL.
//
// z# (str, bytes, read-only buffer compatible object or None) [const
// char *, int]
//
// Like s#, but the Python object may also be None, in which case the
// C pointer is set to NULL.
//
// y (bytes) [const char *]
//
// This format converts a bytes-like object to a C pointer to a
// character string; it does not accept Unicode objects. The bytes
// buffer must not contain embedded NUL bytes; if it does, a TypeError
// exception is raised.
//
// y* (bytes, bytearray or bytes-like object) [Py_buffer]
//
// This variant on s* doesn’t accept Unicode objects, only bytes-like
// objects. This is the recommended way to accept binary data.
//
// y# (bytes) [const char *, int]
//
// This variant on s# doesn’t accept Unicode objects, only bytes-like
// objects.
//
// S (bytes) [PyBytesObject *]
//
// Requires that the Python object is a bytes object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytes object. The C variable may also be declared as PyObject*.
//
// Y (bytearray) [PyByteArrayObject *]
//
// Requires that the Python object is a bytearray object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytearray object. The C variable may also be declared as PyObject*.
//
// u (str) [Py_UNICODE *]
//
// Convert a Python Unicode object to a C pointer to a NUL-terminated
// buffer of Unicode characters. You must pass the address of a
// Py_UNICODE pointer variable, which will be filled with the pointer
// to an existing Unicode buffer. Please note that the width of a
// Py_UNICODE character depends on compilation options (it is either
// 16 or 32 bits). The Python string must not contain embedded NUL
// characters; if it does, a TypeError exception is raised.
//
// Note Since u doesn’t give you back the length of the string, and it
// may contain embedded NUL characters, it is recommended to use u# or
// U instead.
//
// u# (str) [Py_UNICODE *, int]
//
// This variant on u stores into two C variables, the first one a
// pointer to a Unicode data buffer, the second one its length.
//
// Z (str or None) [Py_UNICODE *]
//
// Like u, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// Z# (str or None) [Py_UNICODE *, int]
//
// Like u#, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// U (str) [PyObject *]
//
// Requires that the Python object is a Unicode object, without
// attempting any conversion. Raises TypeError if the object is not a
// Unicode object. The C variable may also be declared as PyObject*.
//
// w* (bytearray or read-write byte-oriented buffer) [Py_buffer]
//
// This format accepts any object which implements the read-write
// buffer interface. It fills a Py_buffer structure provided by the
// caller. The buffer may contain embedded null bytes. The caller have
// to call PyBuffer_Release() when it is done with the buffer.
//
// es (str) [const char *encoding, char **buffer]
//
// This variant on s is used for encoding Unicode into a character
// buffer. It only works for encoded data without embedded NUL bytes.
//
// This format requires two arguments. The first is only used as
// input, and must be a const char* which points to the name of an
// encoding as a NUL-terminated string, or NULL, in which case 'utf-8'
// encoding is used. An exception is raised if the named encoding is
// not known to Python. The second argument must be a char**; the
// value of the pointer it references will be set to a buffer with the
// contents of the argument text. The text will be encoded in the
// encoding specified by the first argument.
//
// PyArg_ParseTuple() will allocate a buffer of the needed size, copy
// the encoded data into this buffer and adjust *buffer to reference
// the newly allocated storage. The caller is responsible for calling
// PyMem_Free() to free the allocated buffer after use.
//
// et (str, bytes or bytearray) [const char *encoding, char **buffer]
//
// Same as es except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// es# (str) [const char *encoding, char **buffer, int *buffer_length]
//
// This variant on s# is used for encoding Unicode into a character
// buffer. Unlike the es format, this variant allows input data which
// contains NUL characters.
//
// It requires three arguments. The first is only used as input, and
// must be a const char* which points to the name of an encoding as a
// NUL-terminated string, or NULL, in which case 'utf-8' encoding is
// used. An exception is raised if the named encoding is not known to
// Python. The second argument must be a char**; the value of the
// pointer it references will be set to a buffer with the contents of
// the argument text. The text will be encoded in the encoding
// specified by the first argument. The third argument must be a
// pointer to an integer; the referenced integer will be set to the
// number of bytes in the output buffer.
//
// There are two modes of operation:
//
// If *buffer points a NULL pointer, the function will allocate a
// buffer of the needed size, copy the encoded data into this buffer
// and set *buffer to reference the newly allocated storage. The
// caller is responsible for calling PyMem_Free() to free the
// allocated buffer after usage.
//
// If *buffer points to a non-NULL pointer (an already allocated
// buffer), PyArg_ParseTuple() will use this location as the buffer
// and interpret the initial value of *buffer_length as the buffer
// size. It will then copy the encoded data into the buffer and
// NUL-terminate it. If the buffer is not large enough, a ValueError
// will be set.
//
// In both cases, *buffer_length is set to the length of the encoded
// data without the trailing NUL byte.
//
// et# (str, bytes or bytearray) [const char *encoding, char **buffer,
// int *buffer_length]
//
// Same as es# except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// Numbers
//
// b (int) [unsigned char]
//
// Convert a nonnegative Python integer to an unsigned tiny int,
// stored in a C unsigned char.
//
// B (int) [unsigned char]
//
// Convert a Python integer to a tiny int without overflow checking,
// stored in a C unsigned char. h (int) [short int]
//
// Convert a Python integer to a C short int.
//
// H (int) [unsigned short int]
//
// Convert a Python integer to a C unsigned short int, without
// overflow checking.
//
// i (int) [int]
//
// Convert a Python integer to a plain C int.
//
// I (int) [unsigned int]
//
// Convert a Python integer to a C unsigned int, without overflow
// checking.
//
// l (int) [long int]
//
// Convert a Python integer to a C long int.
//
// k (int) [unsigned long]
//
// Convert a Python integer to a C unsigned long without overflow
// checking.
//
// L (int) [PY_LONG_LONG]
//
// Convert a Python integer to a C long long. This format is only
// available on platforms that support long long (or _int64 on
// Windows).
//
// K (int) [unsigned PY_LONG_LONG]
//
// Convert a Python integer to a C unsigned long long without overflow
// checking. This format is only available on platforms that support
// unsigned long long (or unsigned _int64 on Windows).
//
// n (int) [Py_ssize_t]
//
// Convert a Python integer to a C Py_ssize_t.
//
// c (bytes or bytearray of length 1) [char]
//
// Convert a Python byte, represented as a bytes or bytearray object
// of length 1, to a C char.
//
// Changed in version 3.3: Allow bytearray objects.
//
// C (str of length 1) [int]
//
// Convert a Python character, represented as a str object of length 1, to a C int.
//
// f (float) [float]
//
// Convert a Python floating point number to a C float.
//
// d (float) [double]
//
// Convert a Python floating point number to a C double.
//
// D (complex) [Py_complex]
//
// Convert a Python complex number to a C Py_complex structure.
//
// Other objects
//
// O (object) [PyObject *]
//
// Store a Python object (without any conversion) in a C object
// pointer. The C program thus receives the actual object that was
// passed. The object’s reference count is not increased. The pointer
// stored is not NULL.
//
// O! (object) [typeobject, PyObject *]
//
// Store a Python object in a C object pointer. This is similar to O,
// but takes two C arguments: the first is the address of a Python
// type object, the second is the address of the C variable (of type
// PyObject*) into which the object pointer is stored. If the Python
// object does not have the required type, TypeError is raised.
//
// O& (object) [converter, anything]
//
// Convert a Python object to a C variable through a converter
// function. This takes two arguments: the first is a function, the
// second is the address of a C variable (of arbitrary type),
// converted to void *. The converter function in turn is called as
// follows:
//
// status = converter(object, address);
//
// where object is the Python object to be converted and address is
// the void* argument that was passed to the PyArg_Parse*()
// function. The returned status should be 1 for a successful
// conversion and 0 if the conversion has failed. When the conversion
// fails, the converter function should raise an exception and leave
// the content of address unmodified.
//
// If the converter returns Py_CLEANUP_SUPPORTED, it may get called a
// second time if the argument parsing eventually fails, giving the
// converter a chance to release any memory that it had already
// allocated. In this second call, the object parameter will be NULL;
// address will have the same value as in the original call.
//
// Changed in version 3.1: Py_CLEANUP_SUPPORTED was added.
//
// p (bool) [int]
//
// Tests the value passed in for truth (a boolean predicate) and
// converts the result to its equivalent C true/false integer
// value. Sets the int to 1 if the expression was true and 0 if it was
// false. This accepts any valid Python value. See Truth Value Testing
// for more information about how Python tests values for truth.
//
// New in version 3.3.
//
// (items) (tuple) [matching-items]
//
// The object must be a Python sequence whose length is the number of
// format units in items. The C arguments must correspond to the
// individual format units in items. Format units for sequences may be
// nested.
//
// It is possible to pass “long” integers (integers whose value
// exceeds the platform’s LONG_MAX) however no proper range checking
// is done — the most significant bits are silently truncated when the
// receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := | tch op.code {
case 'O':
*result = arg
case 'Z':
switch op.modifier {
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
case '#', 0:
switch arg := arg.(type) {
case String, NoneType:
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'z':
switch op.modifier {
default:
switch arg := arg.(type) {
case String, NoneType:
// ok
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes, NoneType:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str, bytes-like or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'U':
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 's':
switch op.modifier {
default:
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'y':
switch op.modifier {
default:
if _, ok := arg.(Bytes); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'i', 'n':
if _, ok := arg.(Int); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be int, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'p':
if _, ok := arg.(Bool); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bool, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'd':
switch x := arg.(type) {
case Int:
*result = Float(x)
case Float:
*result = x
default:
return ExceptionNewf(TypeError, "%s() argument %d must be float, not %s", name, i+1, arg.Type().Name)
}
default:
return ExceptionNewf(TypeError, "Unknown/Unimplemented format character %q in ParseTupleAndKeywords called from %s", op, name)
}
}
return nil
}
// Parse tuple only
func ParseTuple(args Tuple, format string, results ...*Object) error {
return ParseTupleAndKeywords(args, nil, format, nil, results...)
}
type formatOp struct {
code byte
modifier byte
}
// Parse the format
func parseFormat(format string, in []formatOp) (min int, name string, kwOnly_i int, ops []formatOp) {
name = "function"
min = -1
kwOnly_i = 0xFFFF
ops = in[:0]
N := len(format)
for i := 0; i < N; {
op := formatOp{code: format[i]}
i++
if i < N {
if mod := format[i]; mod == '*' || mod == '#' {
op.modifier = mod
i++
}
}
switch op.code {
case ':', ';':
name = format[i:]
i = N
case '$':
kwOnly_i = len(ops)
case '|':
min = len(ops)
default:
ops = append(ops, op)
}
}
if min < 0 {
min = len(ops)
}
return
}
// Checks the number of args passed in
func checkNumberOfArgs(name string, nargs, nresults, min, max int) error {
if min == max {
if nargs != max {
return ExceptionNewf(TypeError, "%s() takes exactly %d arguments (%d given)", name, max, nargs)
}
} else {
if nargs > max {
return ExceptionNewf(TypeError, "%s() takes at most %d arguments (%d given)", name, max, nargs)
}
if nargs < min {
return ExceptionNewf(TypeError, "%s() takes at least %d arguments (%d given)", name, min, nargs)
}
}
if nargs > nresults {
return ExceptionNewf(TypeError, "Internal error: not enough arguments supplied to Unpack*/Parse*")
}
return nil
}
// Unpack the args tuple into the results
//
// Up to the caller to set default values
func UnpackTuple(args Tuple, kwargs StringDict, name string, min int, max int, results ...*Object) error {
if len(kwargs) != 0 {
return ExceptionNewf(TypeError, "%s() does not take keyword arguments", name)
}
// Check number of arguments
err := checkNumberOfArgs(name, len(args), len(results), min, max)
if err != nil {
return err
}
// Copy the results in
for i := range args {
*results[i] = args[i]
}
return nil
}
| results[i]
swi | conditional_block |
args.go | // Copyright 2018 The go-python Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Argument parsing for Go functions called by python
//
// These functions are useful when creating your own extensions
// functions and methods. Additional information and examples are
// available in Extending and Embedding the Python Interpreter.
//
// The first three of these functions described, PyArg_ParseTuple(),
// PyArg_ParseTupleAndKeywords(), and PyArg_Parse(), all use format
// strings which are used to tell the function about the expected
// arguments. The format strings use the same syntax for each of these
// functions.
//
// Parsing arguments
//
// A format string consists of zero or more “format units.” A format
// unit describes one Python object; it is usually a single character
// or a parenthesized sequence of format units. With a few exceptions,
// a format unit that is not a parenthesized sequence normally
// corresponds to a single address argument to these functions. In the
// following description, the quoted form is the format unit; the
// entry in (round) parentheses is the Python object type that matches
// the format unit; and the entry in [square] brackets is the type of
// the C variable(s) whose address should be passed.
//
// s (str) [const char *]
//
// Convert a Unicode object to a C pointer to a character string. A
// pointer to an existing string is stored in the character pointer
// variable whose address you pass. The C string is
// NUL-terminated. The Python string must not contain embedded NUL
// bytes; if it does, a TypeError exception is raised. Unicode objects
// are converted to C strings using 'utf-8' encoding. If this
// conversion fails, a UnicodeError is raised.
//
// Note This format does not accept bytes-like objects. If you want to
// accept filesystem paths and convert them to C character strings, it
// is preferable to use the O& format with PyUnicode_FSConverter() as
// converter.
//
// s* (str, bytes, bytearray or buffer compatible object) [Py_buffer]
//
// This format accepts Unicode objects as well as bytes-like
// objects. It fills a Py_buffer structure provided by the caller. In
// this case the resulting C string may contain embedded NUL
// bytes. Unicode objects are converted to C strings using 'utf-8'
// encoding.
//
// s# (str, bytes or read-only buffer compatible object) [const char *, int or Py_ssize_t]
//
// Like s*, except that it doesn’t accept mutable buffer-like objects
// such as bytearray. The result is stored into two C variables, the
// first one a pointer to a C string, the second one its length. The
// string may contain embedded null bytes. Unicode objects are
// converted to C strings using 'utf-8' encoding.
//
// z (str or None) [const char *]
//
// Like s, but the Python object may also be None, in which case the C
// pointer is set to NULL.
//
// z* (str, bytes, bytearray, buffer compatible object or None)
// [Py_buffer]
//
// Like s*, but the Python object may also be None, in which case the
// buf member of the Py_buffer structure is set to NULL.
//
// z# (str, bytes, read-only buffer compatible object or None) [const
// char *, int]
//
// Like s#, but the Python object may also be None, in which case the
// C pointer is set to NULL.
//
// y (bytes) [const char *]
//
// This format converts a bytes-like object to a C pointer to a
// character string; it does not accept Unicode objects. The bytes
// buffer must not contain embedded NUL bytes; if it does, a TypeError
// exception is raised.
//
// y* (bytes, bytearray or bytes-like object) [Py_buffer]
//
// This variant on s* doesn’t accept Unicode objects, only bytes-like
// objects. This is the recommended way to accept binary data.
//
// y# (bytes) [const char *, int]
//
// This variant on s# doesn’t accept Unicode objects, only bytes-like
// objects.
//
// S (bytes) [PyBytesObject *]
//
// Requires that the Python object is a bytes object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytes object. The C variable may also be declared as PyObject*.
//
// Y (bytearray) [PyByteArrayObject *]
//
// Requires that the Python object is a bytearray object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytearray object. The C variable may also be declared as PyObject*.
//
// u (str) [Py_UNICODE *]
//
// Convert a Python Unicode object to a C pointer to a NUL-terminated
// buffer of Unicode characters. You must pass the address of a
// Py_UNICODE pointer variable, which will be filled with the pointer
// to an existing Unicode buffer. Please note that the width of a
// Py_UNICODE character depends on compilation options (it is either
// 16 or 32 bits). The Python string must not contain embedded NUL
// characters; if it does, a TypeError exception is raised.
//
// Note Since u doesn’t give you back the length of the string, and it
// may contain embedded NUL characters, it is recommended to use u# or
// U instead.
//
// u# (str) [Py_UNICODE *, int]
//
// This variant on u stores into two C variables, the first one a
// pointer to a Unicode data buffer, the second one its length.
//
// Z (str or None) [Py_UNICODE *]
//
// Like u, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// Z# (str or None) [Py_UNICODE *, int]
//
// Like u#, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// U (str) [PyObject *]
//
// Requires that the Python object is a Unicode object, without
// attempting any conversion. Raises TypeError if the object is not a
// Unicode object. The C variable may also be declared as PyObject*.
//
// w* (bytearray or read-write byte-oriented buffer) [Py_buffer]
//
// This format accepts any object which implements the read-write
// buffer interface. It fills a Py_buffer structure provided by the
// caller. The buffer may contain embedded null bytes. The caller have
// to call PyBuffer_Release() when it is done with the buffer.
//
// es (str) [const char *encoding, char **buffer]
//
// This variant on s is used for encoding Unicode into a character
// buffer. It only works for encoded data without embedded NUL bytes.
//
// This format requires two arguments. The first is only used as
// input, and must be a const char* which points to the name of an
// encoding as a NUL-terminated string, or NULL, in which case 'utf-8'
// encoding is used. An exception is raised if the named encoding is
// not known to Python. The second argument must be a char**; the
// value of the pointer it references will be set to a buffer with the
// contents of the argument text. The text will be encoded in the
// encoding specified by the first argument.
//
// PyArg_ParseTuple() will allocate a buffer of the needed size, copy
// the encoded data into this buffer and adjust *buffer to reference
// the newly allocated storage. The caller is responsible for calling
// PyMem_Free() to free the allocated buffer after use.
//
// et (str, bytes or bytearray) [const char *encoding, char **buffer]
//
// Same as es except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// es# (str) [const char *encoding, char **buffer, int *buffer_length]
//
// This variant on s# is used for encoding Unicode into a character
// buffer. Unlike the es format, this variant allows input data which
// contains NUL characters.
//
// It requires three arguments. The first is only used as input, and
// must be a const char* which points to the name of an encoding as a
// NUL-terminated string, or NULL, in which case 'utf-8' encoding is
// used. An exception is raised if the named encoding is not known to
// Python. The second argument must be a char**; the value of the
// pointer it references will be set to a buffer with the contents of
// the argument text. The text will be encoded in the encoding
// specified by the first argument. The third argument must be a
// pointer to an integer; the referenced integer will be set to the
// number of bytes in the output buffer.
//
// There are two modes of operation:
//
// If *buffer points a NULL pointer, the function will allocate a
// buffer of the needed size, copy the encoded data into this buffer
// and set *buffer to reference the newly allocated storage. The
// caller is responsible for calling PyMem_Free() to free the
// allocated buffer after usage.
//
// If *buffer points to a non-NULL pointer (an already allocated
// buffer), PyArg_ParseTuple() will use this location as the buffer
// and interpret the initial value of *buffer_length as the buffer
// size. It will then copy the encoded data into the buffer and
// NUL-terminate it. If the buffer is not large enough, a ValueError
// will be set.
//
// In both cases, *buffer_length is set to the length of the encoded
// data without the trailing NUL byte.
//
// et# (str, bytes or bytearray) [const char *encoding, char **buffer,
// int *buffer_length]
//
// Same as es# except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// Numbers
//
// b (int) [unsigned char]
//
// Convert a nonnegative Python integer to an unsigned tiny int,
// stored in a C unsigned char.
//
// B (int) [unsigned char]
//
// Convert a Python integer to a tiny int without overflow checking,
// stored in a C unsigned char. h (int) [short int]
//
// Convert a Python integer to a C short int.
//
// H (int) [unsigned short int]
//
// Convert a Python integer to a C unsigned short int, without
// overflow checking.
//
// i (int) [int]
//
// Convert a Python integer to a plain C int.
//
// I (int) [unsigned int]
//
// Convert a Python integer to a C unsigned int, without overflow
// checking.
//
// l (int) [long int]
//
// Convert a Python integer to a C long int.
//
// k (int) [unsigned long]
//
// Convert a Python integer to a C unsigned long without overflow
// checking.
//
// L (int) [PY_LONG_LONG]
//
// Convert a Python integer to a C long long. This format is only
// available on platforms that support long long (or _int64 on
// Windows).
//
// K (int) [unsigned PY_LONG_LONG]
//
// Convert a Python integer to a C unsigned long long without overflow
// checking. This format is only available on platforms that support
// unsigned long long (or unsigned _int64 on Windows).
//
// n (int) [Py_ssize_t]
//
// Convert a Python integer to a C Py_ssize_t.
//
// c (bytes or bytearray of length 1) [char]
//
// Convert a Python byte, represented as a bytes or bytearray object
// of length 1, to a C char.
//
// Changed in version 3.3: Allow bytearray objects.
//
// C (str of length 1) [int]
//
// Convert a Python character, represented as a str object of length 1, to a C int.
//
// f (float) [float]
//
// Convert a Python floating point number to a C float.
//
// d (float) [double]
//
// Convert a Python floating point number to a C double.
//
// D (complex) [Py_complex]
//
// Convert a Python complex number to a C Py_complex structure.
//
// Other objects
//
// O (object) [PyObject *]
//
// Store a Python object (without any conversion) in a C object
// pointer. The C program thus receives the actual object that was
// passed. The object’s reference count is not increased. The pointer
// stored is not NULL.
//
// O! (object) [typeobject, PyObject *]
//
// Store a Python object in a C object pointer. This is similar to O,
// but takes two C arguments: the first is the address of a Python
// type object, the second is the address of the C variable (of type
// PyObject*) into which the object pointer is stored. If the Python
// object does not have the required type, TypeError is raised.
//
// O& (object) [converter, anything]
//
// Convert a Python object to a C variable through a converter
// function. This takes two arguments: the first is a function, the
// second is the address of a C variable (of arbitrary type),
// converted to void *. The converter function in turn is called as
// follows:
//
// status = converter(object, address);
//
// where object is the Python object to be converted and address is
// the void* argument that was passed to the PyArg_Parse*()
// function. The returned status should be 1 for a successful
// conversion and 0 if the conversion has failed. When the conversion
// fails, the converter function should raise an exception and leave
// the content of address unmodified.
//
// If the converter returns Py_CLEANUP_SUPPORTED, it may get called a
// second time if the argument parsing eventually fails, giving the
// converter a chance to release any memory that it had already
// allocated. In this second call, the object parameter will be NULL;
// address will have the same value as in the original call.
//
// Changed in version 3.1: Py_CLEANUP_SUPPORTED was added.
//
// p (bool) [int]
//
// Tests the value passed in for truth (a boolean predicate) and
// converts the result to its equivalent C true/false integer
// value. Sets the int to 1 if the expression was true and 0 if it was
// false. This accepts any valid Python value. See Truth Value Testing
// for more information about how Python tests values for truth.
//
// New in version 3.3.
//
// (items) (tuple) [matching-items]
//
// The object must be a Python sequence whose length is the number of
// format units in items. The C arguments must correspond to the
// individual format units in items. Format units for sequences may be
// nested.
//
// It is possible to pass “long” integers (integers whose value
// exceeds the platform’s LONG_MAX) however no proper range checking
// is done — the most significant bits are silently truncated when the
// receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := results[i]
switch op.code {
case 'O':
*result = arg
case 'Z':
switch op.modifier {
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
case '#', 0:
switch arg := arg.(type) {
case String, NoneType:
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'z':
switch op.modifier {
default:
switch arg := arg.(type) {
case String, NoneType:
// ok
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes, NoneType:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str, bytes-like or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'U':
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 's':
switch op.modifier {
default:
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'y':
switch op.modifier {
default:
if _, ok := arg.(Bytes); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'i', 'n':
if _, ok := arg.(Int); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be int, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'p':
if _, ok := arg.(Bool); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bool, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'd':
switch x := arg.(type) {
case Int:
*result = Float(x)
case Float:
*result = x
default:
return ExceptionNewf(TypeError, "%s() argument %d must be float, not %s", name, i+1, arg.Type().Name)
}
default:
return ExceptionNewf(TypeError, "Unknown/Unimplemented format character %q in ParseTupleAndKeywords called from %s", op, name)
}
}
return nil
}
// Parse tuple only
func ParseTuple(args Tuple, format string, results ...*Object) error {
return ParseTupleAndKeywords(args, nil, format, nil, results...)
}
type formatOp struct {
code byte
modifier byte
}
// Parse the format
func parseFormat(format string, in []formatOp) (min int, name string, kwOnly_i int, ops []formatOp) {
name = "function"
min = -1 | passed in
func checkNumberOfArgs(name string, nargs, nresults, min, max int) error {
if min == max {
if nargs != max {
return ExceptionNewf(TypeError, "%s() takes exactly %d arguments (%d given)", name, max, nargs)
}
} else {
if nargs > max {
return ExceptionNewf(TypeError, "%s() takes at most %d arguments (%d given)", name, max, nargs)
}
if nargs < min {
return ExceptionNewf(TypeError, "%s() takes at least %d arguments (%d given)", name, min, nargs)
}
}
if nargs > nresults {
return ExceptionNewf(TypeError, "Internal error: not enough arguments supplied to Unpack*/Parse*")
}
return nil
}
// Unpack the args tuple into the results
//
// Up to the caller to set default values
func UnpackTuple(args Tuple, kwargs StringDict, name string, min int, max int, results ...*Object) error {
if len(kwargs) != 0 {
return ExceptionNewf(TypeError, "%s() does not take keyword arguments", name)
}
// Check number of arguments
err := checkNumberOfArgs(name, len(args), len(results), min, max)
if err != nil {
return err
}
// Copy the results in
for i := range args {
*results[i] = args[i]
}
return nil
}
|
kwOnly_i = 0xFFFF
ops = in[:0]
N := len(format)
for i := 0; i < N; {
op := formatOp{code: format[i]}
i++
if i < N {
if mod := format[i]; mod == '*' || mod == '#' {
op.modifier = mod
i++
}
}
switch op.code {
case ':', ';':
name = format[i:]
i = N
case '$':
kwOnly_i = len(ops)
case '|':
min = len(ops)
default:
ops = append(ops, op)
}
}
if min < 0 {
min = len(ops)
}
return
}
// Checks the number of args | identifier_body |
sudo.py | #Video sequence is just a collection of frames or collection of images that runs with respect to time.
#Make code stare at background without hand
#Bring hand in foreground with background
#Apply background-subtraction
#Thresholding is the assigment of pixel intensities to 0’s and 1’s based a particular threshold level so that our object of interest alone is captured from an image.
#Contour is the outline or boundary of an object located in an image.
'''
ToDo's:
Background Subtraction
Motion Detection and Thresholding
Contour Extraction
'''
import cv2
import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull
extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0])
#print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initi | eight for running average
alphaWeight = 0.5 #if we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.read()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand
#print(thresholded)
#print(segmented)
#draw segmented region and display the frames
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255)) #(destination_img, contours to draw, contourIdx(-1 denotes all contours are drawn), color)
#count no of count_fingers
fingers = count_fingers(thresholded, segmented)
cv2.putText(clone, "Detected Value: "+str(fingers), (70,45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0 , 255), 2)
#display thresholded image
cv2.imshow("Thresholded", thresholded)
captureScreen(fingers)
#draw segmented hand
cv2.rectangle(clone, (left, top), (right,bottom), (0, 255, 0), 2)
#increment frame counter
num_frames +=1
#display frame with segmented hand
cv2.imshow("Output", clone)
#terminate condition
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
compute() | alize w | identifier_name |
sudo.py | #Video sequence is just a collection of frames or collection of images that runs with respect to time.
#Make code stare at background without hand
#Bring hand in foreground with background
#Apply background-subtraction
#Thresholding is the assigment of pixel intensities to 0’s and 1’s based a particular threshold level so that our object of interest alone is captured from an image.
#Contour is the outline or boundary of an object located in an image.
'''
ToDo's:
Background Subtraction
Motion Detection and Thresholding
Contour Extraction
'''
import cv2
import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull | #print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initialize weight for running average
alphaWeight = 0.5 #if we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.read()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand
#print(thresholded)
#print(segmented)
#draw segmented region and display the frames
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255)) #(destination_img, contours to draw, contourIdx(-1 denotes all contours are drawn), color)
#count no of count_fingers
fingers = count_fingers(thresholded, segmented)
cv2.putText(clone, "Detected Value: "+str(fingers), (70,45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0 , 255), 2)
#display thresholded image
cv2.imshow("Thresholded", thresholded)
captureScreen(fingers)
#draw segmented hand
cv2.rectangle(clone, (left, top), (right,bottom), (0, 255, 0), 2)
#increment frame counter
num_frames +=1
#display frame with segmented hand
cv2.imshow("Output", clone)
#terminate condition
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
compute() | extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0]) | random_line_split |
sudo.py | #Video sequence is just a collection of frames or collection of images that runs with respect to time.
#Make code stare at background without hand
#Bring hand in foreground with background
#Apply background-subtraction
#Thresholding is the assigment of pixel intensities to 0’s and 1’s based a particular threshold level so that our object of interest alone is captured from an image.
#Contour is the outline or boundary of an object located in an image.
'''
ToDo's:
Background Subtraction
Motion Detection and Thresholding
Contour Extraction
'''
import cv2
import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull
extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0])
#print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initialize weight for running average
alphaWeight = 0.5 #if | in__":
compute() | we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.read()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand
#print(thresholded)
#print(segmented)
#draw segmented region and display the frames
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255)) #(destination_img, contours to draw, contourIdx(-1 denotes all contours are drawn), color)
#count no of count_fingers
fingers = count_fingers(thresholded, segmented)
cv2.putText(clone, "Detected Value: "+str(fingers), (70,45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0 , 255), 2)
#display thresholded image
cv2.imshow("Thresholded", thresholded)
captureScreen(fingers)
#draw segmented hand
cv2.rectangle(clone, (left, top), (right,bottom), (0, 255, 0), 2)
#increment frame counter
num_frames +=1
#display frame with segmented hand
cv2.imshow("Output", clone)
#terminate condition
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
if __name__ == "__ma | identifier_body |
sudo.py | #Video sequence is just a collection of frames or collection of images that runs with respect to time.
#Make code stare at background without hand
#Bring hand in foreground with background
#Apply background-subtraction
#Thresholding is the assigment of pixel intensities to 0’s and 1’s based a particular threshold level so that our object of interest alone is captured from an image.
#Contour is the outline or boundary of an object located in an image.
'''
ToDo's:
Background Subtraction
Motion Detection and Thresholding
Contour Extraction
'''
import cv2
import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull
extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0])
#print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initialize weight for running average
alphaWeight = 0.5 #if we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.re | se()
cv2.destroyAllWindows()
if __name__ == "__main__":
compute() | ad()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand
#print(thresholded)
#print(segmented)
#draw segmented region and display the frames
cv2.drawContours(clone, [segmented + (right, top)], -1, (0, 0, 255)) #(destination_img, contours to draw, contourIdx(-1 denotes all contours are drawn), color)
#count no of count_fingers
fingers = count_fingers(thresholded, segmented)
cv2.putText(clone, "Detected Value: "+str(fingers), (70,45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0 , 255), 2)
#display thresholded image
cv2.imshow("Thresholded", thresholded)
captureScreen(fingers)
#draw segmented hand
cv2.rectangle(clone, (left, top), (right,bottom), (0, 255, 0), 2)
#increment frame counter
num_frames +=1
#display frame with segmented hand
cv2.imshow("Output", clone)
#terminate condition
keypress = cv2.waitKey(1) & 0xFF
if keypress == ord("q"):
break
camera.relea | conditional_block |
cloudFoundryDeploy.go | package cmd
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/SAP/jenkins-library/pkg/cloudfoundry"
"github.com/SAP/jenkins-library/pkg/command"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/yaml"
"github.com/elliotchance/orderedmap"
"github.com/pkg/errors"
)
type cfFileUtil interface {
FileExists(string) (bool, error)
FileRename(string, string) error
FileRead(string) ([]byte, error)
FileWrite(path string, content []byte, perm os.FileMode) error
Getwd() (string, error)
Glob(string) ([]string, error)
Chmod(string, os.FileMode) error
Copy(string, string) (int64, error)
Stat(path string) (os.FileInfo, error)
}
var _now = time.Now
var _cfLogin = cfLogin
var _cfLogout = cfLogout
var _getManifest = getManifest
var _replaceVariables = yaml.Substitute
var _getVarsOptions = cloudfoundry.GetVarsOptions
var _getVarsFileOptions = cloudfoundry.GetVarsFileOptions
var _environ = os.Environ
var fileUtils cfFileUtil = piperutils.Files{}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogin(c command.ExecRunner, options cloudfoundry.LoginOptions) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Login(options)
}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogout(c command.ExecRunner) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Logout()
}
const defaultSmokeTestScript = `#!/usr/bin/env bash
# this is simply testing if the application root returns HTTP STATUS_CODE
curl -so /dev/null -w '%{response_code}' https://$1 | grep $STATUS_CODE`
func cloudFoundryDeploy(config cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux) {
// for command execution use Command
c := command.Command{}
// reroute command output to logging framework
c.Stdout(log.Writer())
c.Stderr(log.Writer())
// for http calls import piperhttp "github.com/SAP/jenkins-library/pkg/http"
// and use a &piperhttp.Client{} in a custom system
// Example: step checkmarxExecuteScan.go
// error situations should stop execution through log.Entry().Fatal() call which leads to an os.Exit(1) in the end
err := runCloudFoundryDeploy(&config, telemetryData, influxData, &c)
if err != nil {
log.Entry().WithError(err).Fatalf("step execution failed: %s", err)
}
}
func runCloudFoundryDeploy(config *cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux, command command.ExecRunner) error {
log.Entry().Infof("General parameters: deployTool='%s', deployType='%s', cfApiEndpoint='%s', cfOrg='%s', cfSpace='%s'",
config.DeployTool, config.DeployType, config.APIEndpoint, config.Org, config.Space)
err := validateAppName(config.AppName)
if err != nil {
return err
}
validateDeployTool(config)
var deployTriggered bool
if config.DeployTool == "mtaDeployPlugin" {
deployTriggered = true
err = handleMTADeployment(config, command)
} else if config.DeployTool == "cf_native" {
deployTriggered = true
err = handleCFNativeDeployment(config, command)
} else {
log.Entry().Warningf("Found unsupported deployTool ('%s'). Skipping deployment. Supported deploy tools: 'mtaDeployPlugin', 'cf_native'", config.DeployTool)
}
if deployTriggered {
prepareInflux(err == nil, config, influxData)
}
return err
}
func validateDeployTool(config *cloudFoundryDeployOptions) {
if config.DeployTool != "" || config.BuildTool == "" {
return
}
switch config.BuildTool {
case "mta":
config.DeployTool = "mtaDeployPlugin"
default:
config.DeployTool = "cf_native"
}
log.Entry().Infof("Parameter deployTool not specified - deriving from buildTool '%s': '%s'",
config.BuildTool, config.DeployTool)
}
func validateAppName(appName string) error {
// for the sake of brevity we consider the empty string as valid app name here
isValidAppName, err := regexp.MatchString("^$|^[a-zA-Z0-9]$|^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$", appName)
if err != nil {
return err
}
if isValidAppName {
return nil
}
const (
underscore = "_"
dash = "-"
docuLink = "https://docs.cloudfoundry.org/devguide/deploy-apps/deploy-app.html#basic-settings"
)
log.Entry().Warningf("Your application name '%s' contains non-alphanumeric characters which may lead to errors in the future, "+
"as they are not supported by CloudFoundry. For more details please visit %s", appName, docuLink)
var fail bool
message := []string{fmt.Sprintf("Your application name '%s'", appName)}
if strings.Contains(appName, underscore) {
message = append(message, fmt.Sprintf("contains a '%s' (underscore) which is not allowed, only letters, dashes and numbers can be used.", underscore))
fail = true
}
if strings.HasPrefix(appName, dash) || strings.HasSuffix(appName, dash) {
message = append(message, fmt.Sprintf("starts or ends with a '%s' (dash) which is not allowed, only letters and numbers can be used.", dash))
fail = true
}
message = append(message, fmt.Sprintf("Please change the name to fit this requirement(s). For more details please visit %s.", docuLink))
if fail {
return fmt.Errorf(strings.Join(message, " "))
}
return nil
}
func prepareInflux(success bool, config *cloudFoundryDeployOptions, influxData *cloudFoundryDeployInflux) {
if influxData == nil {
return
}
result := "FAILURE"
if success {
result = "SUCCESS"
}
influxData.deployment_data.tags.artifactVersion = config.ArtifactVersion
influxData.deployment_data.tags.deployUser = config.Username
influxData.deployment_data.tags.deployResult = result
influxData.deployment_data.tags.cfAPIEndpoint = config.APIEndpoint
influxData.deployment_data.tags.cfOrg = config.Org
influxData.deployment_data.tags.cfSpace = config.Space
// n/a (literally) is also reported in groovy
influxData.deployment_data.fields.artifactURL = "n/a"
influxData.deployment_data.fields.commitHash = config.CommitHash
influxData.deployment_data.fields.deployTime = strings.ToUpper(_now().Format("Jan 02 2006 15:04:05"))
// we should discuss how we handle the job trigger
// 1.) outside Jenkins
// 2.) inside Jenkins (how to get)
influxData.deployment_data.fields.jobTrigger = "n/a"
}
func handleMTADeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
mtarFilePath := config.MtaPath
if len(mtarFilePath) == 0 {
var err error
mtarFilePath, err = findMtar()
if err != nil {
return err
}
log.Entry().Debugf("Using mtar file '%s' found in workspace", mtarFilePath)
} else {
exists, err := fileUtils.FileExists(mtarFilePath)
if err != nil {
return errors.Wrapf(err, "Cannot check if file path '%s' exists", mtarFilePath)
}
if !exists {
return fmt.Errorf("mtar file '%s' retrieved from configuration does not exist", mtarFilePath)
}
log.Entry().Debugf("Using mtar file '%s' from configuration", mtarFilePath)
}
return deployMta(config, mtarFilePath, command)
}
type deployConfig struct {
DeployCommand string
DeployOptions []string
AppName string
ManifestFile string
SmokeTestScript []string
}
func handleCFNativeDeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
deployType, err := checkAndUpdateDeployTypeForNotSupportedManifest(config)
if err != nil {
return err
}
var deployCommand string
var smokeTestScript []string
var deployOptions []string
// deploy command will be provided by the prepare functions below
if deployType == "blue-green" {
deployCommand, deployOptions, smokeTestScript, err = prepareBlueGreenCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf native deployment. DeployType '%s'", deployType)
}
} else if deployType == "standard" {
deployCommand, deployOptions, smokeTestScript, err = prepareCfPushCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf push native deployment. DeployType '%s'", deployType)
}
} else {
return fmt.Errorf("Invalid deploy type received: '%s'. Supported values: %v", deployType, []string{"blue-green", "standard"})
}
appName, err := getAppName(config)
if err != nil {
return err
}
manifestFile, err := getManifestFileName(config)
log.Entry().Infof("CF native deployment ('%s') with:", config.DeployType)
log.Entry().Infof("cfAppName='%s'", appName)
log.Entry().Infof("cfManifest='%s'", manifestFile)
log.Entry().Infof("cfManifestVariables: '%v'", config.ManifestVariables)
log.Entry().Infof("cfManifestVariablesFiles: '%v'", config.ManifestVariablesFiles)
log.Entry().Infof("cfdeployDockerImage: '%s'", config.DeployDockerImage)
log.Entry().Infof("smokeTestScript: '%s'", config.SmokeTestScript)
additionalEnvironment := []string{
"STATUS_CODE=" + strconv.FormatInt(int64(config.SmokeTestStatusCode), 10),
}
if len(config.DockerPassword) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_DOCKER_PASSWORD="+config.DockerPassword)
}
myDeployConfig := deployConfig{
DeployCommand: deployCommand,
DeployOptions: deployOptions,
AppName: config.AppName,
ManifestFile: config.Manifest,
SmokeTestScript: smokeTestScript,
}
log.Entry().Infof("DeployConfig: %v", myDeployConfig)
return deployCfNative(myDeployConfig, config, additionalEnvironment, command)
}
func deployCfNative(deployConfig deployConfig, config *cloudFoundryDeployOptions, additionalEnvironment []string, cmd command.ExecRunner) error {
deployStatement := []string{
deployConfig.DeployCommand,
}
if len(deployConfig.AppName) > 0 {
deployStatement = append(deployStatement, deployConfig.AppName)
}
if len(deployConfig.DeployOptions) > 0 {
deployStatement = append(deployStatement, deployConfig.DeployOptions...)
}
if len(deployConfig.ManifestFile) > 0 {
deployStatement = append(deployStatement, "-f")
deployStatement = append(deployStatement, deployConfig.ManifestFile)
}
if len(config.DeployDockerImage) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-image", config.DeployDockerImage)
}
if len(config.DockerUsername) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-username", config.DockerUsername)
}
if len(deployConfig.SmokeTestScript) > 0 {
deployStatement = append(deployStatement, deployConfig.SmokeTestScript...)
}
if len(config.CfNativeDeployParameters) > 0 {
deployStatement = append(deployStatement, strings.Fields(config.CfNativeDeployParameters)...)
}
stopOldAppIfRunning := func(_cmd command.ExecRunner) error {
if config.KeepOldInstance && config.DeployType == "blue-green" {
oldAppName := deployConfig.AppName + "-old"
var buff bytes.Buffer
_cmd.Stdout(&buff)
defer func() {
_cmd.Stdout(log.Writer())
}()
err := _cmd.RunExecutable("cf", "stop", oldAppName)
if err != nil {
cfStopLog := buff.String()
if !strings.Contains(cfStopLog, oldAppName+" not found") {
return fmt.Errorf("Could not stop application '%s'. Error: %s", oldAppName, cfStopLog)
}
log.Entry().Infof("Cannot stop application '%s' since this appliation was not found.", oldAppName)
} else {
log.Entry().Infof("Old application '%s' has been stopped.", oldAppName)
}
}
return nil
}
return cfDeploy(config, deployStatement, additionalEnvironment, stopOldAppIfRunning, cmd)
}
func getManifest(name string) (cloudfoundry.Manifest, error) {
return cloudfoundry.ReadManifest(name)
}
func getManifestFileName(config *cloudFoundryDeployOptions) (string, error) |
func getAppName(config *cloudFoundryDeployOptions) (string, error) {
if len(config.AppName) > 0 {
return config.AppName, nil
}
if config.DeployType == "blue-green" {
return "", fmt.Errorf("Blue-green plugin requires app name to be passed (see https://github.com/bluemixgaragelondon/cf-blue-green-deploy/issues/27)")
}
manifestFile, err := getManifestFileName(config)
fileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !fileExists {
return "", fmt.Errorf("Manifest file '%s' not found. Cannot retrieve app name", manifestFile)
}
manifest, err := _getManifest(manifestFile)
if err != nil {
return "", err
}
apps, err := manifest.GetApplications()
if err != nil {
return "", err
}
if len(apps) == 0 {
return "", fmt.Errorf("No apps declared in manifest '%s'", manifestFile)
}
namePropertyExists, err := manifest.ApplicationHasProperty(0, "name")
if err != nil {
return "", err
}
if !namePropertyExists {
return "", fmt.Errorf("No appName available in manifest '%s'", manifestFile)
}
appName, err := manifest.GetApplicationProperty(0, "name")
if err != nil {
return "", err
}
var name string
var ok bool
if name, ok = appName.(string); !ok {
return "", fmt.Errorf("appName from manifest '%s' has wrong type", manifestFile)
}
if len(name) == 0 {
return "", fmt.Errorf("appName from manifest '%s' is empty", manifestFile)
}
return name, nil
}
func handleSmokeTestScript(smokeTestScript string) ([]string, error) {
if smokeTestScript == "blueGreenCheckScript.sh" {
// what should we do if there is already a script with the given name? Should we really overwrite ...
err := fileUtils.FileWrite(smokeTestScript, []byte(defaultSmokeTestScript), 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to write default smoke-test script: %w", err)
}
log.Entry().Debugf("smoke test script '%s' has been written.", smokeTestScript)
}
if len(smokeTestScript) > 0 {
err := fileUtils.Chmod(smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
smokeTest, err := handleSmokeTestScript(config.SmokeTestScript)
if err != nil {
return "", []string{}, []string{}, err
}
var deployOptions = []string{}
if !config.KeepOldInstance {
deployOptions = append(deployOptions, "--delete-old-apps")
}
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !manifestFileExists {
log.Entry().Infof("Manifest file '%s' does not exist", manifestFile)
} else {
manifestVariables, err := toStringInterfaceMap(toParameterMap(config.ManifestVariables))
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare manifest variables: '%v'", config.ManifestVariables)
}
manifestVariablesFiles, err := validateManifestVariablesFiles(config.ManifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot validate manifest variables files '%v'", config.ManifestVariablesFiles)
}
modified, err := _replaceVariables(manifestFile, manifestVariables, manifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrap(err, "Cannot prepare manifest file")
}
if modified {
log.Entry().Infof("Manifest file '%s' has been updated (variable substitution)", manifestFile)
} else {
log.Entry().Infof("Manifest file '%s' has not been updated (no variable substitution)", manifestFile)
}
err = handleLegacyCfManifest(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot handle legacy manifest '%s'", manifestFile)
}
}
return "blue-green-deploy", deployOptions, smokeTest, nil
}
// validateManifestVariablesFiles: in case the only provided file is 'manifest-variables.yml' and this file does not
// exist we ignore that file. For any other file there is no check if that file exists. In case several files are
// provided we also do not check for the default file 'manifest-variables.yml'
func validateManifestVariablesFiles(manifestVariablesFiles []string) ([]string, error) {
const defaultManifestVariableFileName = "manifest-variables.yml"
if len(manifestVariablesFiles) == 1 && manifestVariablesFiles[0] == defaultManifestVariableFileName {
// we have only the default file. Most likely this is not configured, but we simply have the default.
// In case this file does not exist we ignore that file.
exists, err := fileUtils.FileExists(defaultManifestVariableFileName)
if err != nil {
return []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", defaultManifestVariableFileName)
}
if !exists {
return []string{}, nil
}
}
return manifestVariablesFiles, nil
}
func toParameterMap(parameters []string) (*orderedmap.OrderedMap, error) {
parameterMap := orderedmap.NewOrderedMap()
for _, p := range parameters {
keyVal := strings.Split(p, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("Invalid parameter provided (expected format <key>=<val>: '%s'", p)
}
parameterMap.Set(keyVal[0], keyVal[1])
}
return parameterMap, nil
}
func handleLegacyCfManifest(manifestFile string) error {
manifest, err := _getManifest(manifestFile)
if err != nil {
return err
}
err = manifest.Transform()
if err != nil {
return err
}
if manifest.IsModified() {
err = manifest.WriteManifest()
if err != nil {
return err
}
log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...)
}
if config.DeployType == "bg-deploy" || config.DeployType == "blue-green" {
deployCommand = "bg-deploy"
const noConfirmFlag = "--no-confirm"
if !piperutils.ContainsString(deployParams, noConfirmFlag) {
deployParams = append(deployParams, noConfirmFlag)
}
}
cfDeployParams := []string{
deployCommand,
mtarFilePath,
}
if len(deployParams) > 0 {
cfDeployParams = append(cfDeployParams, deployParams...)
}
extFileParams, extFiles := handleMtaExtensionDescriptors(config.MtaExtensionDescriptor)
for _, extFile := range extFiles {
_, err := fileUtils.Copy(extFile, extFile+".original")
if err != nil {
return fmt.Errorf("Cannot prepare mta extension files: %w", err)
}
_, _, err = handleMtaExtensionCredentials(extFile, config.MtaExtensionCredentials)
if err != nil {
return fmt.Errorf("Cannot handle credentials inside mta extension files: %w", err)
}
}
cfDeployParams = append(cfDeployParams, extFileParams...)
err := cfDeploy(config, cfDeployParams, nil, nil, command)
for _, extFile := range extFiles {
renameError := fileUtils.FileRename(extFile+".original", extFile)
if err == nil && renameError != nil {
return renameError
}
}
return err
}
func handleMtaExtensionCredentials(extFile string, credentials map[string]interface{}) (updated, containsUnresolved bool, err error) {
log.Entry().Debugf("Inserting credentials into extension file '%s'", extFile)
b, err := fileUtils.FileRead(extFile)
if err != nil {
return false, false, errors.Wrapf(err, "Cannot handle credentials for mta extension file '%s'", extFile)
}
content := string(b)
env, err := toMap(_environ(), "=")
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
missingCredentials := []string{}
for name, credentialKey := range credentials {
credKey, ok := credentialKey.(string)
if !ok {
return false, false, fmt.Errorf("cannot handle mta extension credentials: Cannot cast '%v' (type %T) to string", credentialKey, credentialKey)
}
const allowedVariableNamePattern = "^[-_A-Za-z0-9]+$"
alphaNumOnly := regexp.MustCompile(allowedVariableNamePattern)
if !alphaNumOnly.MatchString(name) {
return false, false, fmt.Errorf("credential key name '%s' contains unsupported character. Must contain only %s", name, allowedVariableNamePattern)
}
pattern := regexp.MustCompile("<%=\\s*" + name + "\\s*%>")
if pattern.MatchString(content) {
cred := env[toEnvVarKey(credKey)]
if len(cred) == 0 {
missingCredentials = append(missingCredentials, credKey)
continue
}
content = pattern.ReplaceAllLiteralString(content, cred)
updated = true
log.Entry().Debugf("Mta extension credentials handling: Placeholder '%s' has been replaced by credential denoted by '%s'/'%s' in file '%s'", name, credKey, toEnvVarKey(credKey), extFile)
} else {
log.Entry().Debugf("Mta extension credentials handling: Variable '%s' is not used in file '%s'", name, extFile)
}
}
if len(missingCredentials) > 0 {
missinCredsEnvVarKeyCompatible := []string{}
for _, missingKey := range missingCredentials {
missinCredsEnvVarKeyCompatible = append(missinCredsEnvVarKeyCompatible, toEnvVarKey(missingKey))
}
// ensure stable order of the entries. Needed e.g. for the tests.
sort.Strings(missingCredentials)
sort.Strings(missinCredsEnvVarKeyCompatible)
return false, false, fmt.Errorf("cannot handle mta extension credentials: No credentials found for '%s'/'%s'. Are these credentials maintained?", missingCredentials, missinCredsEnvVarKeyCompatible)
}
if !updated {
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has not been updated. Seems to contain no credentials.", extFile)
} else {
fInfo, err := fileUtils.Stat(extFile)
fMode := fInfo.Mode()
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
err = fileUtils.FileWrite(extFile, []byte(content), fMode)
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has been updated.", extFile)
}
re := regexp.MustCompile(`<%=.+%>`)
placeholders := re.FindAll([]byte(content), -1)
containsUnresolved = (len(placeholders) > 0)
if containsUnresolved {
log.Entry().Warningf("mta extension credential handling: Unresolved placeholders found after inserting credentials: %s", placeholders)
}
return updated, containsUnresolved, nil
}
func toEnvVarKey(key string) string {
key = regexp.MustCompile(`[^A-Za-z0-9]`).ReplaceAllString(key, "_")
return strings.ToUpper(regexp.MustCompile(`([a-z0-9])([A-Z])`).ReplaceAllString(key, "${1}_${2}"))
}
func toMap(keyValue []string, separator string) (map[string]string, error) {
result := map[string]string{}
for _, entry := range keyValue {
kv := strings.Split(entry, separator)
if len(kv) < 2 {
return map[string]string{}, fmt.Errorf("Cannot convert to map: separator '%s' not found in entry '%s'", separator, entry)
}
result[kv[0]] = strings.Join(kv[1:], separator)
}
return result, nil
}
func handleMtaExtensionDescriptors(mtaExtensionDescriptor string) ([]string, []string) {
var result = []string{}
var extFiles = []string{}
for _, part := range strings.Fields(strings.Trim(mtaExtensionDescriptor, " ")) {
if part == "-e" || part == "" {
continue
}
// REVISIT: maybe check if the extension descriptor exists
extFiles = append(extFiles, part)
}
if len(extFiles) > 0 {
result = append(result, "-e")
result = append(result, strings.Join(extFiles, ","))
}
return result, extFiles
}
func cfDeploy(
config *cloudFoundryDeployOptions,
cfDeployParams []string,
additionalEnvironment []string,
postDeployAction func(command command.ExecRunner) error,
command command.ExecRunner) error {
const cfLogFile = "cf.log"
var err error
var loginPerformed bool
additionalEnvironment = append(additionalEnvironment, "CF_TRACE="+cfLogFile)
if len(config.CfHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_HOME="+config.CfHome)
}
if len(config.CfPluginHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_PLUGIN_HOME="+config.CfPluginHome)
}
log.Entry().Infof("Using additional environment variables: %s", additionalEnvironment)
// TODO set HOME to config.DockerWorkspace
command.SetEnv(additionalEnvironment)
err = command.RunExecutable("cf", "version")
if err == nil {
err = _cfLogin(command, cloudfoundry.LoginOptions{
CfAPIEndpoint: config.APIEndpoint,
CfOrg: config.Org,
CfSpace: config.Space,
Username: config.Username,
Password: config.Password,
CfLoginOpts: strings.Fields(config.LoginParameters),
})
}
if err == nil {
loginPerformed = true
err = command.RunExecutable("cf", []string{"plugins"}...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", []string{"plugins"})
}
}
if err == nil {
err = command.RunExecutable("cf", cfDeployParams...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", cfDeployParams)
}
}
if err == nil && postDeployAction != nil {
err = postDeployAction(command)
}
if loginPerformed {
logoutErr := _cfLogout(command)
if logoutErr != nil {
log.Entry().WithError(logoutErr).Errorf("Cannot perform cf logout")
if err == nil {
err = logoutErr
}
}
}
if err != nil || GeneralConfig.Verbose {
e := handleCfCliLog(cfLogFile)
if e != nil {
log.Entry().WithError(err).Errorf("Error reading cf log file '%s'.", cfLogFile)
}
}
return err
}
func findMtar() (string, error) {
const pattern = "**/*.mtar"
mtars, err := fileUtils.Glob(pattern)
if err != nil {
return "", err
}
if len(mtars) == 0 {
return "", fmt.Errorf("No mtar file matching pattern '%s' found", pattern)
}
if len(mtars) > 1 {
sMtars := []string{}
sMtars = append(sMtars, mtars...)
return "", fmt.Errorf("Found multiple mtar files matching pattern '%s' (%s), please specify file via parameter 'mtarPath'", pattern, strings.Join(sMtars, ","))
}
return mtars[0], nil
}
func handleCfCliLog(logFile string) error {
fExists, err := fileUtils.FileExists(logFile)
if err != nil {
return err
}
log.Entry().Info("### START OF CF CLI TRACE OUTPUT ###")
if fExists {
f, err := os.Open(logFile)
if err != nil {
return err
}
defer f.Close()
bReader := bufio.NewReader(f)
for {
line, err := bReader.ReadString('\n')
if err == nil || err == io.EOF {
// maybe inappropriate to log as info. Maybe the line from the
// log indicates an error, but that is something like a project
// standard.
log.Entry().Info(strings.TrimSuffix(line, "\n"))
}
if err != nil {
break
}
}
} else {
log.Entry().Warningf("No trace file found at '%s'", logFile)
}
log.Entry().Info("### END OF CF CLI TRACE OUTPUT ###")
return err
}
| {
manifestFileName := config.Manifest
if len(manifestFileName) == 0 {
manifestFileName = "manifest.yml"
}
return manifestFileName, nil
} | identifier_body |
cloudFoundryDeploy.go | package cmd
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/SAP/jenkins-library/pkg/cloudfoundry"
"github.com/SAP/jenkins-library/pkg/command"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/yaml"
"github.com/elliotchance/orderedmap"
"github.com/pkg/errors"
)
type cfFileUtil interface {
FileExists(string) (bool, error)
FileRename(string, string) error
FileRead(string) ([]byte, error)
FileWrite(path string, content []byte, perm os.FileMode) error
Getwd() (string, error)
Glob(string) ([]string, error)
Chmod(string, os.FileMode) error
Copy(string, string) (int64, error)
Stat(path string) (os.FileInfo, error)
}
var _now = time.Now
var _cfLogin = cfLogin
var _cfLogout = cfLogout
var _getManifest = getManifest
var _replaceVariables = yaml.Substitute
var _getVarsOptions = cloudfoundry.GetVarsOptions
var _getVarsFileOptions = cloudfoundry.GetVarsFileOptions
var _environ = os.Environ
var fileUtils cfFileUtil = piperutils.Files{}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogin(c command.ExecRunner, options cloudfoundry.LoginOptions) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Login(options)
}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogout(c command.ExecRunner) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Logout()
}
const defaultSmokeTestScript = `#!/usr/bin/env bash
# this is simply testing if the application root returns HTTP STATUS_CODE
curl -so /dev/null -w '%{response_code}' https://$1 | grep $STATUS_CODE`
func cloudFoundryDeploy(config cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux) {
// for command execution use Command
c := command.Command{}
// reroute command output to logging framework
c.Stdout(log.Writer())
c.Stderr(log.Writer())
// for http calls import piperhttp "github.com/SAP/jenkins-library/pkg/http"
// and use a &piperhttp.Client{} in a custom system
// Example: step checkmarxExecuteScan.go
// error situations should stop execution through log.Entry().Fatal() call which leads to an os.Exit(1) in the end
err := runCloudFoundryDeploy(&config, telemetryData, influxData, &c)
if err != nil {
log.Entry().WithError(err).Fatalf("step execution failed: %s", err)
}
}
func runCloudFoundryDeploy(config *cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux, command command.ExecRunner) error {
log.Entry().Infof("General parameters: deployTool='%s', deployType='%s', cfApiEndpoint='%s', cfOrg='%s', cfSpace='%s'",
config.DeployTool, config.DeployType, config.APIEndpoint, config.Org, config.Space)
err := validateAppName(config.AppName)
if err != nil {
return err
}
validateDeployTool(config)
var deployTriggered bool
if config.DeployTool == "mtaDeployPlugin" {
deployTriggered = true
err = handleMTADeployment(config, command)
} else if config.DeployTool == "cf_native" {
deployTriggered = true
err = handleCFNativeDeployment(config, command)
} else {
log.Entry().Warningf("Found unsupported deployTool ('%s'). Skipping deployment. Supported deploy tools: 'mtaDeployPlugin', 'cf_native'", config.DeployTool)
}
if deployTriggered {
prepareInflux(err == nil, config, influxData)
}
return err
}
func validateDeployTool(config *cloudFoundryDeployOptions) {
if config.DeployTool != "" || config.BuildTool == "" {
return
}
switch config.BuildTool {
case "mta":
config.DeployTool = "mtaDeployPlugin"
default:
config.DeployTool = "cf_native"
}
log.Entry().Infof("Parameter deployTool not specified - deriving from buildTool '%s': '%s'",
config.BuildTool, config.DeployTool)
}
func validateAppName(appName string) error {
// for the sake of brevity we consider the empty string as valid app name here
isValidAppName, err := regexp.MatchString("^$|^[a-zA-Z0-9]$|^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$", appName)
if err != nil {
return err
}
if isValidAppName {
return nil
}
const (
underscore = "_"
dash = "-"
docuLink = "https://docs.cloudfoundry.org/devguide/deploy-apps/deploy-app.html#basic-settings"
)
log.Entry().Warningf("Your application name '%s' contains non-alphanumeric characters which may lead to errors in the future, "+
"as they are not supported by CloudFoundry. For more details please visit %s", appName, docuLink)
var fail bool
message := []string{fmt.Sprintf("Your application name '%s'", appName)}
if strings.Contains(appName, underscore) {
message = append(message, fmt.Sprintf("contains a '%s' (underscore) which is not allowed, only letters, dashes and numbers can be used.", underscore))
fail = true
}
if strings.HasPrefix(appName, dash) || strings.HasSuffix(appName, dash) {
message = append(message, fmt.Sprintf("starts or ends with a '%s' (dash) which is not allowed, only letters and numbers can be used.", dash))
fail = true
}
message = append(message, fmt.Sprintf("Please change the name to fit this requirement(s). For more details please visit %s.", docuLink))
if fail {
return fmt.Errorf(strings.Join(message, " "))
}
return nil
}
func prepareInflux(success bool, config *cloudFoundryDeployOptions, influxData *cloudFoundryDeployInflux) {
if influxData == nil {
return
}
result := "FAILURE"
if success {
result = "SUCCESS"
}
influxData.deployment_data.tags.artifactVersion = config.ArtifactVersion
influxData.deployment_data.tags.deployUser = config.Username
influxData.deployment_data.tags.deployResult = result
influxData.deployment_data.tags.cfAPIEndpoint = config.APIEndpoint
influxData.deployment_data.tags.cfOrg = config.Org
influxData.deployment_data.tags.cfSpace = config.Space
// n/a (literally) is also reported in groovy
influxData.deployment_data.fields.artifactURL = "n/a"
influxData.deployment_data.fields.commitHash = config.CommitHash
influxData.deployment_data.fields.deployTime = strings.ToUpper(_now().Format("Jan 02 2006 15:04:05"))
// we should discuss how we handle the job trigger
// 1.) outside Jenkins
// 2.) inside Jenkins (how to get)
influxData.deployment_data.fields.jobTrigger = "n/a"
}
func handleMTADeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
mtarFilePath := config.MtaPath
if len(mtarFilePath) == 0 {
var err error
mtarFilePath, err = findMtar()
if err != nil {
return err
}
log.Entry().Debugf("Using mtar file '%s' found in workspace", mtarFilePath)
} else {
exists, err := fileUtils.FileExists(mtarFilePath)
if err != nil {
return errors.Wrapf(err, "Cannot check if file path '%s' exists", mtarFilePath)
}
if !exists {
return fmt.Errorf("mtar file '%s' retrieved from configuration does not exist", mtarFilePath)
}
log.Entry().Debugf("Using mtar file '%s' from configuration", mtarFilePath)
}
return deployMta(config, mtarFilePath, command)
}
type deployConfig struct {
DeployCommand string
DeployOptions []string
AppName string
ManifestFile string
SmokeTestScript []string
}
func handleCFNativeDeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
deployType, err := checkAndUpdateDeployTypeForNotSupportedManifest(config)
if err != nil {
return err
}
var deployCommand string
var smokeTestScript []string
var deployOptions []string
// deploy command will be provided by the prepare functions below
if deployType == "blue-green" {
deployCommand, deployOptions, smokeTestScript, err = prepareBlueGreenCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf native deployment. DeployType '%s'", deployType)
}
} else if deployType == "standard" {
deployCommand, deployOptions, smokeTestScript, err = prepareCfPushCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf push native deployment. DeployType '%s'", deployType)
}
} else {
return fmt.Errorf("Invalid deploy type received: '%s'. Supported values: %v", deployType, []string{"blue-green", "standard"})
}
appName, err := getAppName(config)
if err != nil {
return err
}
manifestFile, err := getManifestFileName(config)
log.Entry().Infof("CF native deployment ('%s') with:", config.DeployType)
log.Entry().Infof("cfAppName='%s'", appName)
log.Entry().Infof("cfManifest='%s'", manifestFile)
log.Entry().Infof("cfManifestVariables: '%v'", config.ManifestVariables)
log.Entry().Infof("cfManifestVariablesFiles: '%v'", config.ManifestVariablesFiles)
log.Entry().Infof("cfdeployDockerImage: '%s'", config.DeployDockerImage)
log.Entry().Infof("smokeTestScript: '%s'", config.SmokeTestScript)
additionalEnvironment := []string{
"STATUS_CODE=" + strconv.FormatInt(int64(config.SmokeTestStatusCode), 10),
}
if len(config.DockerPassword) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_DOCKER_PASSWORD="+config.DockerPassword)
}
myDeployConfig := deployConfig{
DeployCommand: deployCommand,
DeployOptions: deployOptions,
AppName: config.AppName,
ManifestFile: config.Manifest,
SmokeTestScript: smokeTestScript,
}
log.Entry().Infof("DeployConfig: %v", myDeployConfig)
return deployCfNative(myDeployConfig, config, additionalEnvironment, command)
}
func deployCfNative(deployConfig deployConfig, config *cloudFoundryDeployOptions, additionalEnvironment []string, cmd command.ExecRunner) error {
deployStatement := []string{
deployConfig.DeployCommand,
}
if len(deployConfig.AppName) > 0 {
deployStatement = append(deployStatement, deployConfig.AppName)
}
if len(deployConfig.DeployOptions) > 0 {
deployStatement = append(deployStatement, deployConfig.DeployOptions...)
}
if len(deployConfig.ManifestFile) > 0 {
deployStatement = append(deployStatement, "-f")
deployStatement = append(deployStatement, deployConfig.ManifestFile)
}
if len(config.DeployDockerImage) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-image", config.DeployDockerImage)
}
if len(config.DockerUsername) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-username", config.DockerUsername)
}
if len(deployConfig.SmokeTestScript) > 0 {
deployStatement = append(deployStatement, deployConfig.SmokeTestScript...)
}
if len(config.CfNativeDeployParameters) > 0 {
deployStatement = append(deployStatement, strings.Fields(config.CfNativeDeployParameters)...)
}
stopOldAppIfRunning := func(_cmd command.ExecRunner) error {
if config.KeepOldInstance && config.DeployType == "blue-green" {
oldAppName := deployConfig.AppName + "-old"
var buff bytes.Buffer
_cmd.Stdout(&buff)
defer func() {
_cmd.Stdout(log.Writer())
}()
err := _cmd.RunExecutable("cf", "stop", oldAppName)
if err != nil {
cfStopLog := buff.String()
if !strings.Contains(cfStopLog, oldAppName+" not found") {
return fmt.Errorf("Could not stop application '%s'. Error: %s", oldAppName, cfStopLog)
}
log.Entry().Infof("Cannot stop application '%s' since this appliation was not found.", oldAppName)
} else {
log.Entry().Infof("Old application '%s' has been stopped.", oldAppName)
}
}
return nil
}
return cfDeploy(config, deployStatement, additionalEnvironment, stopOldAppIfRunning, cmd)
}
func getManifest(name string) (cloudfoundry.Manifest, error) {
return cloudfoundry.ReadManifest(name)
}
func getManifestFileName(config *cloudFoundryDeployOptions) (string, error) {
manifestFileName := config.Manifest
if len(manifestFileName) == 0 {
manifestFileName = "manifest.yml"
}
return manifestFileName, nil
}
func getAppName(config *cloudFoundryDeployOptions) (string, error) {
if len(config.AppName) > 0 {
return config.AppName, nil
}
if config.DeployType == "blue-green" {
return "", fmt.Errorf("Blue-green plugin requires app name to be passed (see https://github.com/bluemixgaragelondon/cf-blue-green-deploy/issues/27)")
}
manifestFile, err := getManifestFileName(config)
fileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !fileExists {
return "", fmt.Errorf("Manifest file '%s' not found. Cannot retrieve app name", manifestFile)
}
manifest, err := _getManifest(manifestFile)
if err != nil {
return "", err
}
apps, err := manifest.GetApplications()
if err != nil {
return "", err
}
if len(apps) == 0 {
return "", fmt.Errorf("No apps declared in manifest '%s'", manifestFile)
}
namePropertyExists, err := manifest.ApplicationHasProperty(0, "name")
if err != nil {
return "", err
}
if !namePropertyExists {
return "", fmt.Errorf("No appName available in manifest '%s'", manifestFile)
}
appName, err := manifest.GetApplicationProperty(0, "name")
if err != nil {
return "", err
}
var name string
var ok bool
if name, ok = appName.(string); !ok {
return "", fmt.Errorf("appName from manifest '%s' has wrong type", manifestFile)
}
if len(name) == 0 {
return "", fmt.Errorf("appName from manifest '%s' is empty", manifestFile)
}
return name, nil
}
func handleSmokeTestScript(smokeTestScript string) ([]string, error) {
if smokeTestScript == "blueGreenCheckScript.sh" {
// what should we do if there is already a script with the given name? Should we really overwrite ...
err := fileUtils.FileWrite(smokeTestScript, []byte(defaultSmokeTestScript), 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to write default smoke-test script: %w", err)
}
log.Entry().Debugf("smoke test script '%s' has been written.", smokeTestScript)
}
if len(smokeTestScript) > 0 {
err := fileUtils.Chmod(smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
smokeTest, err := handleSmokeTestScript(config.SmokeTestScript)
if err != nil {
return "", []string{}, []string{}, err
}
var deployOptions = []string{}
if !config.KeepOldInstance {
deployOptions = append(deployOptions, "--delete-old-apps")
}
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !manifestFileExists {
log.Entry().Infof("Manifest file '%s' does not exist", manifestFile)
} else {
manifestVariables, err := toStringInterfaceMap(toParameterMap(config.ManifestVariables))
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare manifest variables: '%v'", config.ManifestVariables)
}
manifestVariablesFiles, err := validateManifestVariablesFiles(config.ManifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot validate manifest variables files '%v'", config.ManifestVariablesFiles)
}
modified, err := _replaceVariables(manifestFile, manifestVariables, manifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrap(err, "Cannot prepare manifest file")
}
if modified {
log.Entry().Infof("Manifest file '%s' has been updated (variable substitution)", manifestFile)
} else {
log.Entry().Infof("Manifest file '%s' has not been updated (no variable substitution)", manifestFile)
}
err = handleLegacyCfManifest(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot handle legacy manifest '%s'", manifestFile)
}
}
return "blue-green-deploy", deployOptions, smokeTest, nil
}
// validateManifestVariablesFiles: in case the only provided file is 'manifest-variables.yml' and this file does not
// exist we ignore that file. For any other file there is no check if that file exists. In case several files are
// provided we also do not check for the default file 'manifest-variables.yml'
func validateManifestVariablesFiles(manifestVariablesFiles []string) ([]string, error) {
const defaultManifestVariableFileName = "manifest-variables.yml"
if len(manifestVariablesFiles) == 1 && manifestVariablesFiles[0] == defaultManifestVariableFileName {
// we have only the default file. Most likely this is not configured, but we simply have the default.
// In case this file does not exist we ignore that file.
exists, err := fileUtils.FileExists(defaultManifestVariableFileName)
if err != nil {
return []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", defaultManifestVariableFileName)
}
if !exists {
return []string{}, nil
}
}
return manifestVariablesFiles, nil
}
func toParameterMap(parameters []string) (*orderedmap.OrderedMap, error) {
parameterMap := orderedmap.NewOrderedMap()
for _, p := range parameters {
keyVal := strings.Split(p, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("Invalid parameter provided (expected format <key>=<val>: '%s'", p)
}
parameterMap.Set(keyVal[0], keyVal[1])
}
return parameterMap, nil
}
func handleLegacyCfManifest(manifestFile string) error {
manifest, err := _getManifest(manifestFile)
if err != nil {
return err
}
err = manifest.Transform()
if err != nil {
return err
}
if manifest.IsModified() {
err = manifest.WriteManifest()
if err != nil {
return err
}
log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...)
}
if config.DeployType == "bg-deploy" || config.DeployType == "blue-green" {
deployCommand = "bg-deploy"
const noConfirmFlag = "--no-confirm"
if !piperutils.ContainsString(deployParams, noConfirmFlag) {
deployParams = append(deployParams, noConfirmFlag)
}
}
cfDeployParams := []string{
deployCommand,
mtarFilePath,
}
if len(deployParams) > 0 {
cfDeployParams = append(cfDeployParams, deployParams...)
}
extFileParams, extFiles := handleMtaExtensionDescriptors(config.MtaExtensionDescriptor)
for _, extFile := range extFiles {
_, err := fileUtils.Copy(extFile, extFile+".original")
if err != nil {
return fmt.Errorf("Cannot prepare mta extension files: %w", err)
}
_, _, err = handleMtaExtensionCredentials(extFile, config.MtaExtensionCredentials)
if err != nil {
return fmt.Errorf("Cannot handle credentials inside mta extension files: %w", err)
}
}
cfDeployParams = append(cfDeployParams, extFileParams...)
err := cfDeploy(config, cfDeployParams, nil, nil, command)
for _, extFile := range extFiles {
renameError := fileUtils.FileRename(extFile+".original", extFile)
if err == nil && renameError != nil {
return renameError
}
}
return err
}
func handleMtaExtensionCredentials(extFile string, credentials map[string]interface{}) (updated, containsUnresolved bool, err error) {
log.Entry().Debugf("Inserting credentials into extension file '%s'", extFile)
b, err := fileUtils.FileRead(extFile)
if err != nil {
return false, false, errors.Wrapf(err, "Cannot handle credentials for mta extension file '%s'", extFile)
}
content := string(b)
env, err := toMap(_environ(), "=")
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
missingCredentials := []string{}
for name, credentialKey := range credentials {
credKey, ok := credentialKey.(string)
if !ok {
return false, false, fmt.Errorf("cannot handle mta extension credentials: Cannot cast '%v' (type %T) to string", credentialKey, credentialKey)
}
const allowedVariableNamePattern = "^[-_A-Za-z0-9]+$"
alphaNumOnly := regexp.MustCompile(allowedVariableNamePattern)
if !alphaNumOnly.MatchString(name) {
return false, false, fmt.Errorf("credential key name '%s' contains unsupported character. Must contain only %s", name, allowedVariableNamePattern)
}
pattern := regexp.MustCompile("<%=\\s*" + name + "\\s*%>")
if pattern.MatchString(content) {
cred := env[toEnvVarKey(credKey)]
if len(cred) == 0 {
missingCredentials = append(missingCredentials, credKey)
continue
}
content = pattern.ReplaceAllLiteralString(content, cred)
updated = true
log.Entry().Debugf("Mta extension credentials handling: Placeholder '%s' has been replaced by credential denoted by '%s'/'%s' in file '%s'", name, credKey, toEnvVarKey(credKey), extFile)
} else {
log.Entry().Debugf("Mta extension credentials handling: Variable '%s' is not used in file '%s'", name, extFile)
}
}
if len(missingCredentials) > 0 {
missinCredsEnvVarKeyCompatible := []string{}
for _, missingKey := range missingCredentials {
missinCredsEnvVarKeyCompatible = append(missinCredsEnvVarKeyCompatible, toEnvVarKey(missingKey))
}
// ensure stable order of the entries. Needed e.g. for the tests.
sort.Strings(missingCredentials)
sort.Strings(missinCredsEnvVarKeyCompatible)
return false, false, fmt.Errorf("cannot handle mta extension credentials: No credentials found for '%s'/'%s'. Are these credentials maintained?", missingCredentials, missinCredsEnvVarKeyCompatible)
}
if !updated {
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has not been updated. Seems to contain no credentials.", extFile)
} else {
fInfo, err := fileUtils.Stat(extFile)
fMode := fInfo.Mode()
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
err = fileUtils.FileWrite(extFile, []byte(content), fMode)
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has been updated.", extFile)
}
re := regexp.MustCompile(`<%=.+%>`)
placeholders := re.FindAll([]byte(content), -1)
containsUnresolved = (len(placeholders) > 0)
if containsUnresolved {
log.Entry().Warningf("mta extension credential handling: Unresolved placeholders found after inserting credentials: %s", placeholders)
}
return updated, containsUnresolved, nil
}
func toEnvVarKey(key string) string {
key = regexp.MustCompile(`[^A-Za-z0-9]`).ReplaceAllString(key, "_")
return strings.ToUpper(regexp.MustCompile(`([a-z0-9])([A-Z])`).ReplaceAllString(key, "${1}_${2}"))
}
func | (keyValue []string, separator string) (map[string]string, error) {
result := map[string]string{}
for _, entry := range keyValue {
kv := strings.Split(entry, separator)
if len(kv) < 2 {
return map[string]string{}, fmt.Errorf("Cannot convert to map: separator '%s' not found in entry '%s'", separator, entry)
}
result[kv[0]] = strings.Join(kv[1:], separator)
}
return result, nil
}
func handleMtaExtensionDescriptors(mtaExtensionDescriptor string) ([]string, []string) {
var result = []string{}
var extFiles = []string{}
for _, part := range strings.Fields(strings.Trim(mtaExtensionDescriptor, " ")) {
if part == "-e" || part == "" {
continue
}
// REVISIT: maybe check if the extension descriptor exists
extFiles = append(extFiles, part)
}
if len(extFiles) > 0 {
result = append(result, "-e")
result = append(result, strings.Join(extFiles, ","))
}
return result, extFiles
}
func cfDeploy(
config *cloudFoundryDeployOptions,
cfDeployParams []string,
additionalEnvironment []string,
postDeployAction func(command command.ExecRunner) error,
command command.ExecRunner) error {
const cfLogFile = "cf.log"
var err error
var loginPerformed bool
additionalEnvironment = append(additionalEnvironment, "CF_TRACE="+cfLogFile)
if len(config.CfHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_HOME="+config.CfHome)
}
if len(config.CfPluginHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_PLUGIN_HOME="+config.CfPluginHome)
}
log.Entry().Infof("Using additional environment variables: %s", additionalEnvironment)
// TODO set HOME to config.DockerWorkspace
command.SetEnv(additionalEnvironment)
err = command.RunExecutable("cf", "version")
if err == nil {
err = _cfLogin(command, cloudfoundry.LoginOptions{
CfAPIEndpoint: config.APIEndpoint,
CfOrg: config.Org,
CfSpace: config.Space,
Username: config.Username,
Password: config.Password,
CfLoginOpts: strings.Fields(config.LoginParameters),
})
}
if err == nil {
loginPerformed = true
err = command.RunExecutable("cf", []string{"plugins"}...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", []string{"plugins"})
}
}
if err == nil {
err = command.RunExecutable("cf", cfDeployParams...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", cfDeployParams)
}
}
if err == nil && postDeployAction != nil {
err = postDeployAction(command)
}
if loginPerformed {
logoutErr := _cfLogout(command)
if logoutErr != nil {
log.Entry().WithError(logoutErr).Errorf("Cannot perform cf logout")
if err == nil {
err = logoutErr
}
}
}
if err != nil || GeneralConfig.Verbose {
e := handleCfCliLog(cfLogFile)
if e != nil {
log.Entry().WithError(err).Errorf("Error reading cf log file '%s'.", cfLogFile)
}
}
return err
}
func findMtar() (string, error) {
const pattern = "**/*.mtar"
mtars, err := fileUtils.Glob(pattern)
if err != nil {
return "", err
}
if len(mtars) == 0 {
return "", fmt.Errorf("No mtar file matching pattern '%s' found", pattern)
}
if len(mtars) > 1 {
sMtars := []string{}
sMtars = append(sMtars, mtars...)
return "", fmt.Errorf("Found multiple mtar files matching pattern '%s' (%s), please specify file via parameter 'mtarPath'", pattern, strings.Join(sMtars, ","))
}
return mtars[0], nil
}
func handleCfCliLog(logFile string) error {
fExists, err := fileUtils.FileExists(logFile)
if err != nil {
return err
}
log.Entry().Info("### START OF CF CLI TRACE OUTPUT ###")
if fExists {
f, err := os.Open(logFile)
if err != nil {
return err
}
defer f.Close()
bReader := bufio.NewReader(f)
for {
line, err := bReader.ReadString('\n')
if err == nil || err == io.EOF {
// maybe inappropriate to log as info. Maybe the line from the
// log indicates an error, but that is something like a project
// standard.
log.Entry().Info(strings.TrimSuffix(line, "\n"))
}
if err != nil {
break
}
}
} else {
log.Entry().Warningf("No trace file found at '%s'", logFile)
}
log.Entry().Info("### END OF CF CLI TRACE OUTPUT ###")
return err
}
| toMap | identifier_name |
cloudFoundryDeploy.go | package cmd
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/SAP/jenkins-library/pkg/cloudfoundry"
"github.com/SAP/jenkins-library/pkg/command"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/yaml"
"github.com/elliotchance/orderedmap"
"github.com/pkg/errors"
)
type cfFileUtil interface {
FileExists(string) (bool, error)
FileRename(string, string) error
FileRead(string) ([]byte, error)
FileWrite(path string, content []byte, perm os.FileMode) error
Getwd() (string, error)
Glob(string) ([]string, error)
Chmod(string, os.FileMode) error
Copy(string, string) (int64, error)
Stat(path string) (os.FileInfo, error)
}
var _now = time.Now
var _cfLogin = cfLogin
var _cfLogout = cfLogout
var _getManifest = getManifest
var _replaceVariables = yaml.Substitute
var _getVarsOptions = cloudfoundry.GetVarsOptions
var _getVarsFileOptions = cloudfoundry.GetVarsFileOptions
var _environ = os.Environ
var fileUtils cfFileUtil = piperutils.Files{}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogin(c command.ExecRunner, options cloudfoundry.LoginOptions) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Login(options)
}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogout(c command.ExecRunner) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Logout()
}
const defaultSmokeTestScript = `#!/usr/bin/env bash
# this is simply testing if the application root returns HTTP STATUS_CODE
curl -so /dev/null -w '%{response_code}' https://$1 | grep $STATUS_CODE`
func cloudFoundryDeploy(config cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux) {
// for command execution use Command
c := command.Command{}
// reroute command output to logging framework
c.Stdout(log.Writer())
c.Stderr(log.Writer())
// for http calls import piperhttp "github.com/SAP/jenkins-library/pkg/http"
// and use a &piperhttp.Client{} in a custom system
// Example: step checkmarxExecuteScan.go
// error situations should stop execution through log.Entry().Fatal() call which leads to an os.Exit(1) in the end
err := runCloudFoundryDeploy(&config, telemetryData, influxData, &c)
if err != nil {
log.Entry().WithError(err).Fatalf("step execution failed: %s", err)
}
}
func runCloudFoundryDeploy(config *cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux, command command.ExecRunner) error {
log.Entry().Infof("General parameters: deployTool='%s', deployType='%s', cfApiEndpoint='%s', cfOrg='%s', cfSpace='%s'",
config.DeployTool, config.DeployType, config.APIEndpoint, config.Org, config.Space)
err := validateAppName(config.AppName)
if err != nil {
return err
}
validateDeployTool(config)
var deployTriggered bool
if config.DeployTool == "mtaDeployPlugin" {
deployTriggered = true
err = handleMTADeployment(config, command)
} else if config.DeployTool == "cf_native" {
deployTriggered = true
err = handleCFNativeDeployment(config, command)
} else {
log.Entry().Warningf("Found unsupported deployTool ('%s'). Skipping deployment. Supported deploy tools: 'mtaDeployPlugin', 'cf_native'", config.DeployTool)
}
if deployTriggered {
prepareInflux(err == nil, config, influxData)
}
return err
}
func validateDeployTool(config *cloudFoundryDeployOptions) {
if config.DeployTool != "" || config.BuildTool == "" {
return
}
switch config.BuildTool {
case "mta":
config.DeployTool = "mtaDeployPlugin"
default:
config.DeployTool = "cf_native"
}
log.Entry().Infof("Parameter deployTool not specified - deriving from buildTool '%s': '%s'",
config.BuildTool, config.DeployTool)
}
func validateAppName(appName string) error {
// for the sake of brevity we consider the empty string as valid app name here
isValidAppName, err := regexp.MatchString("^$|^[a-zA-Z0-9]$|^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$", appName)
if err != nil {
return err
}
if isValidAppName {
return nil
}
const (
underscore = "_"
dash = "-"
docuLink = "https://docs.cloudfoundry.org/devguide/deploy-apps/deploy-app.html#basic-settings"
)
log.Entry().Warningf("Your application name '%s' contains non-alphanumeric characters which may lead to errors in the future, "+
"as they are not supported by CloudFoundry. For more details please visit %s", appName, docuLink)
var fail bool
message := []string{fmt.Sprintf("Your application name '%s'", appName)}
if strings.Contains(appName, underscore) {
message = append(message, fmt.Sprintf("contains a '%s' (underscore) which is not allowed, only letters, dashes and numbers can be used.", underscore))
fail = true
}
if strings.HasPrefix(appName, dash) || strings.HasSuffix(appName, dash) {
message = append(message, fmt.Sprintf("starts or ends with a '%s' (dash) which is not allowed, only letters and numbers can be used.", dash))
fail = true
}
message = append(message, fmt.Sprintf("Please change the name to fit this requirement(s). For more details please visit %s.", docuLink))
if fail {
return fmt.Errorf(strings.Join(message, " "))
}
return nil
}
func prepareInflux(success bool, config *cloudFoundryDeployOptions, influxData *cloudFoundryDeployInflux) {
if influxData == nil {
return
}
result := "FAILURE"
if success {
result = "SUCCESS"
}
influxData.deployment_data.tags.artifactVersion = config.ArtifactVersion
influxData.deployment_data.tags.deployUser = config.Username
influxData.deployment_data.tags.deployResult = result
influxData.deployment_data.tags.cfAPIEndpoint = config.APIEndpoint
influxData.deployment_data.tags.cfOrg = config.Org
influxData.deployment_data.tags.cfSpace = config.Space
// n/a (literally) is also reported in groovy
influxData.deployment_data.fields.artifactURL = "n/a"
influxData.deployment_data.fields.commitHash = config.CommitHash
influxData.deployment_data.fields.deployTime = strings.ToUpper(_now().Format("Jan 02 2006 15:04:05"))
// we should discuss how we handle the job trigger
// 1.) outside Jenkins
// 2.) inside Jenkins (how to get)
influxData.deployment_data.fields.jobTrigger = "n/a"
}
func handleMTADeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
mtarFilePath := config.MtaPath
if len(mtarFilePath) == 0 {
var err error
mtarFilePath, err = findMtar()
if err != nil {
return err
}
log.Entry().Debugf("Using mtar file '%s' found in workspace", mtarFilePath)
} else {
exists, err := fileUtils.FileExists(mtarFilePath)
if err != nil {
return errors.Wrapf(err, "Cannot check if file path '%s' exists", mtarFilePath)
}
if !exists {
return fmt.Errorf("mtar file '%s' retrieved from configuration does not exist", mtarFilePath)
}
log.Entry().Debugf("Using mtar file '%s' from configuration", mtarFilePath)
}
return deployMta(config, mtarFilePath, command)
}
type deployConfig struct {
DeployCommand string
DeployOptions []string
AppName string
ManifestFile string
SmokeTestScript []string
}
func handleCFNativeDeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
deployType, err := checkAndUpdateDeployTypeForNotSupportedManifest(config)
if err != nil {
return err
}
var deployCommand string
var smokeTestScript []string
var deployOptions []string
// deploy command will be provided by the prepare functions below
if deployType == "blue-green" {
deployCommand, deployOptions, smokeTestScript, err = prepareBlueGreenCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf native deployment. DeployType '%s'", deployType)
}
} else if deployType == "standard" {
deployCommand, deployOptions, smokeTestScript, err = prepareCfPushCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf push native deployment. DeployType '%s'", deployType)
}
} else {
return fmt.Errorf("Invalid deploy type received: '%s'. Supported values: %v", deployType, []string{"blue-green", "standard"})
}
appName, err := getAppName(config)
if err != nil {
return err
}
manifestFile, err := getManifestFileName(config)
log.Entry().Infof("CF native deployment ('%s') with:", config.DeployType)
log.Entry().Infof("cfAppName='%s'", appName)
log.Entry().Infof("cfManifest='%s'", manifestFile)
log.Entry().Infof("cfManifestVariables: '%v'", config.ManifestVariables)
log.Entry().Infof("cfManifestVariablesFiles: '%v'", config.ManifestVariablesFiles)
log.Entry().Infof("cfdeployDockerImage: '%s'", config.DeployDockerImage)
log.Entry().Infof("smokeTestScript: '%s'", config.SmokeTestScript)
additionalEnvironment := []string{
"STATUS_CODE=" + strconv.FormatInt(int64(config.SmokeTestStatusCode), 10),
}
if len(config.DockerPassword) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_DOCKER_PASSWORD="+config.DockerPassword)
}
myDeployConfig := deployConfig{
DeployCommand: deployCommand,
DeployOptions: deployOptions,
AppName: config.AppName,
ManifestFile: config.Manifest,
SmokeTestScript: smokeTestScript,
}
log.Entry().Infof("DeployConfig: %v", myDeployConfig)
return deployCfNative(myDeployConfig, config, additionalEnvironment, command)
}
func deployCfNative(deployConfig deployConfig, config *cloudFoundryDeployOptions, additionalEnvironment []string, cmd command.ExecRunner) error {
deployStatement := []string{
deployConfig.DeployCommand,
}
if len(deployConfig.AppName) > 0 {
deployStatement = append(deployStatement, deployConfig.AppName)
}
if len(deployConfig.DeployOptions) > 0 {
deployStatement = append(deployStatement, deployConfig.DeployOptions...)
}
if len(deployConfig.ManifestFile) > 0 {
deployStatement = append(deployStatement, "-f")
deployStatement = append(deployStatement, deployConfig.ManifestFile)
}
if len(config.DeployDockerImage) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-image", config.DeployDockerImage)
}
if len(config.DockerUsername) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-username", config.DockerUsername)
}
if len(deployConfig.SmokeTestScript) > 0 {
deployStatement = append(deployStatement, deployConfig.SmokeTestScript...)
}
if len(config.CfNativeDeployParameters) > 0 {
deployStatement = append(deployStatement, strings.Fields(config.CfNativeDeployParameters)...)
}
stopOldAppIfRunning := func(_cmd command.ExecRunner) error {
if config.KeepOldInstance && config.DeployType == "blue-green" {
oldAppName := deployConfig.AppName + "-old"
var buff bytes.Buffer
_cmd.Stdout(&buff)
defer func() {
_cmd.Stdout(log.Writer())
}()
err := _cmd.RunExecutable("cf", "stop", oldAppName)
if err != nil {
cfStopLog := buff.String()
if !strings.Contains(cfStopLog, oldAppName+" not found") {
return fmt.Errorf("Could not stop application '%s'. Error: %s", oldAppName, cfStopLog)
}
log.Entry().Infof("Cannot stop application '%s' since this appliation was not found.", oldAppName)
} else {
log.Entry().Infof("Old application '%s' has been stopped.", oldAppName)
}
}
return nil
}
return cfDeploy(config, deployStatement, additionalEnvironment, stopOldAppIfRunning, cmd)
}
func getManifest(name string) (cloudfoundry.Manifest, error) {
return cloudfoundry.ReadManifest(name)
}
func getManifestFileName(config *cloudFoundryDeployOptions) (string, error) {
manifestFileName := config.Manifest
if len(manifestFileName) == 0 {
manifestFileName = "manifest.yml"
}
return manifestFileName, nil
}
func getAppName(config *cloudFoundryDeployOptions) (string, error) {
if len(config.AppName) > 0 {
return config.AppName, nil
}
if config.DeployType == "blue-green" {
return "", fmt.Errorf("Blue-green plugin requires app name to be passed (see https://github.com/bluemixgaragelondon/cf-blue-green-deploy/issues/27)")
}
manifestFile, err := getManifestFileName(config)
fileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !fileExists {
return "", fmt.Errorf("Manifest file '%s' not found. Cannot retrieve app name", manifestFile)
}
manifest, err := _getManifest(manifestFile)
if err != nil {
return "", err
}
apps, err := manifest.GetApplications()
if err != nil {
return "", err
}
if len(apps) == 0 {
return "", fmt.Errorf("No apps declared in manifest '%s'", manifestFile)
}
namePropertyExists, err := manifest.ApplicationHasProperty(0, "name")
if err != nil {
return "", err
}
if !namePropertyExists {
return "", fmt.Errorf("No appName available in manifest '%s'", manifestFile)
}
appName, err := manifest.GetApplicationProperty(0, "name")
if err != nil {
return "", err
}
var name string
var ok bool
if name, ok = appName.(string); !ok {
return "", fmt.Errorf("appName from manifest '%s' has wrong type", manifestFile)
}
if len(name) == 0 {
return "", fmt.Errorf("appName from manifest '%s' is empty", manifestFile)
}
return name, nil
}
func handleSmokeTestScript(smokeTestScript string) ([]string, error) {
if smokeTestScript == "blueGreenCheckScript.sh" {
// what should we do if there is already a script with the given name? Should we really overwrite ...
err := fileUtils.FileWrite(smokeTestScript, []byte(defaultSmokeTestScript), 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to write default smoke-test script: %w", err)
}
log.Entry().Debugf("smoke test script '%s' has been written.", smokeTestScript)
}
if len(smokeTestScript) > 0 {
err := fileUtils.Chmod(smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
smokeTest, err := handleSmokeTestScript(config.SmokeTestScript)
if err != nil {
return "", []string{}, []string{}, err
}
var deployOptions = []string{}
if !config.KeepOldInstance {
deployOptions = append(deployOptions, "--delete-old-apps")
}
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !manifestFileExists {
log.Entry().Infof("Manifest file '%s' does not exist", manifestFile)
} else {
manifestVariables, err := toStringInterfaceMap(toParameterMap(config.ManifestVariables))
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare manifest variables: '%v'", config.ManifestVariables)
}
manifestVariablesFiles, err := validateManifestVariablesFiles(config.ManifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot validate manifest variables files '%v'", config.ManifestVariablesFiles)
}
modified, err := _replaceVariables(manifestFile, manifestVariables, manifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrap(err, "Cannot prepare manifest file")
}
if modified {
log.Entry().Infof("Manifest file '%s' has been updated (variable substitution)", manifestFile)
} else {
log.Entry().Infof("Manifest file '%s' has not been updated (no variable substitution)", manifestFile)
}
err = handleLegacyCfManifest(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot handle legacy manifest '%s'", manifestFile)
}
}
return "blue-green-deploy", deployOptions, smokeTest, nil
}
// validateManifestVariablesFiles: in case the only provided file is 'manifest-variables.yml' and this file does not
// exist we ignore that file. For any other file there is no check if that file exists. In case several files are
// provided we also do not check for the default file 'manifest-variables.yml'
func validateManifestVariablesFiles(manifestVariablesFiles []string) ([]string, error) {
const defaultManifestVariableFileName = "manifest-variables.yml"
if len(manifestVariablesFiles) == 1 && manifestVariablesFiles[0] == defaultManifestVariableFileName {
// we have only the default file. Most likely this is not configured, but we simply have the default.
// In case this file does not exist we ignore that file.
exists, err := fileUtils.FileExists(defaultManifestVariableFileName)
if err != nil {
return []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", defaultManifestVariableFileName)
}
if !exists {
return []string{}, nil
}
}
return manifestVariablesFiles, nil
}
func toParameterMap(parameters []string) (*orderedmap.OrderedMap, error) {
parameterMap := orderedmap.NewOrderedMap()
for _, p := range parameters {
keyVal := strings.Split(p, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("Invalid parameter provided (expected format <key>=<val>: '%s'", p)
}
parameterMap.Set(keyVal[0], keyVal[1])
}
return parameterMap, nil
}
func handleLegacyCfManifest(manifestFile string) error {
manifest, err := _getManifest(manifestFile)
if err != nil {
return err
}
err = manifest.Transform()
if err != nil {
return err
}
if manifest.IsModified() {
err = manifest.WriteManifest()
if err != nil {
return err
}
log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...)
}
if config.DeployType == "bg-deploy" || config.DeployType == "blue-green" {
deployCommand = "bg-deploy"
const noConfirmFlag = "--no-confirm"
if !piperutils.ContainsString(deployParams, noConfirmFlag) {
deployParams = append(deployParams, noConfirmFlag)
}
}
cfDeployParams := []string{
deployCommand,
mtarFilePath,
}
if len(deployParams) > 0 {
cfDeployParams = append(cfDeployParams, deployParams...)
}
extFileParams, extFiles := handleMtaExtensionDescriptors(config.MtaExtensionDescriptor)
for _, extFile := range extFiles {
_, err := fileUtils.Copy(extFile, extFile+".original")
if err != nil {
return fmt.Errorf("Cannot prepare mta extension files: %w", err)
}
_, _, err = handleMtaExtensionCredentials(extFile, config.MtaExtensionCredentials)
if err != nil |
}
cfDeployParams = append(cfDeployParams, extFileParams...)
err := cfDeploy(config, cfDeployParams, nil, nil, command)
for _, extFile := range extFiles {
renameError := fileUtils.FileRename(extFile+".original", extFile)
if err == nil && renameError != nil {
return renameError
}
}
return err
}
func handleMtaExtensionCredentials(extFile string, credentials map[string]interface{}) (updated, containsUnresolved bool, err error) {
log.Entry().Debugf("Inserting credentials into extension file '%s'", extFile)
b, err := fileUtils.FileRead(extFile)
if err != nil {
return false, false, errors.Wrapf(err, "Cannot handle credentials for mta extension file '%s'", extFile)
}
content := string(b)
env, err := toMap(_environ(), "=")
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
missingCredentials := []string{}
for name, credentialKey := range credentials {
credKey, ok := credentialKey.(string)
if !ok {
return false, false, fmt.Errorf("cannot handle mta extension credentials: Cannot cast '%v' (type %T) to string", credentialKey, credentialKey)
}
const allowedVariableNamePattern = "^[-_A-Za-z0-9]+$"
alphaNumOnly := regexp.MustCompile(allowedVariableNamePattern)
if !alphaNumOnly.MatchString(name) {
return false, false, fmt.Errorf("credential key name '%s' contains unsupported character. Must contain only %s", name, allowedVariableNamePattern)
}
pattern := regexp.MustCompile("<%=\\s*" + name + "\\s*%>")
if pattern.MatchString(content) {
cred := env[toEnvVarKey(credKey)]
if len(cred) == 0 {
missingCredentials = append(missingCredentials, credKey)
continue
}
content = pattern.ReplaceAllLiteralString(content, cred)
updated = true
log.Entry().Debugf("Mta extension credentials handling: Placeholder '%s' has been replaced by credential denoted by '%s'/'%s' in file '%s'", name, credKey, toEnvVarKey(credKey), extFile)
} else {
log.Entry().Debugf("Mta extension credentials handling: Variable '%s' is not used in file '%s'", name, extFile)
}
}
if len(missingCredentials) > 0 {
missinCredsEnvVarKeyCompatible := []string{}
for _, missingKey := range missingCredentials {
missinCredsEnvVarKeyCompatible = append(missinCredsEnvVarKeyCompatible, toEnvVarKey(missingKey))
}
// ensure stable order of the entries. Needed e.g. for the tests.
sort.Strings(missingCredentials)
sort.Strings(missinCredsEnvVarKeyCompatible)
return false, false, fmt.Errorf("cannot handle mta extension credentials: No credentials found for '%s'/'%s'. Are these credentials maintained?", missingCredentials, missinCredsEnvVarKeyCompatible)
}
if !updated {
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has not been updated. Seems to contain no credentials.", extFile)
} else {
fInfo, err := fileUtils.Stat(extFile)
fMode := fInfo.Mode()
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
err = fileUtils.FileWrite(extFile, []byte(content), fMode)
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has been updated.", extFile)
}
re := regexp.MustCompile(`<%=.+%>`)
placeholders := re.FindAll([]byte(content), -1)
containsUnresolved = (len(placeholders) > 0)
if containsUnresolved {
log.Entry().Warningf("mta extension credential handling: Unresolved placeholders found after inserting credentials: %s", placeholders)
}
return updated, containsUnresolved, nil
}
func toEnvVarKey(key string) string {
key = regexp.MustCompile(`[^A-Za-z0-9]`).ReplaceAllString(key, "_")
return strings.ToUpper(regexp.MustCompile(`([a-z0-9])([A-Z])`).ReplaceAllString(key, "${1}_${2}"))
}
func toMap(keyValue []string, separator string) (map[string]string, error) {
result := map[string]string{}
for _, entry := range keyValue {
kv := strings.Split(entry, separator)
if len(kv) < 2 {
return map[string]string{}, fmt.Errorf("Cannot convert to map: separator '%s' not found in entry '%s'", separator, entry)
}
result[kv[0]] = strings.Join(kv[1:], separator)
}
return result, nil
}
func handleMtaExtensionDescriptors(mtaExtensionDescriptor string) ([]string, []string) {
var result = []string{}
var extFiles = []string{}
for _, part := range strings.Fields(strings.Trim(mtaExtensionDescriptor, " ")) {
if part == "-e" || part == "" {
continue
}
// REVISIT: maybe check if the extension descriptor exists
extFiles = append(extFiles, part)
}
if len(extFiles) > 0 {
result = append(result, "-e")
result = append(result, strings.Join(extFiles, ","))
}
return result, extFiles
}
func cfDeploy(
config *cloudFoundryDeployOptions,
cfDeployParams []string,
additionalEnvironment []string,
postDeployAction func(command command.ExecRunner) error,
command command.ExecRunner) error {
const cfLogFile = "cf.log"
var err error
var loginPerformed bool
additionalEnvironment = append(additionalEnvironment, "CF_TRACE="+cfLogFile)
if len(config.CfHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_HOME="+config.CfHome)
}
if len(config.CfPluginHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_PLUGIN_HOME="+config.CfPluginHome)
}
log.Entry().Infof("Using additional environment variables: %s", additionalEnvironment)
// TODO set HOME to config.DockerWorkspace
command.SetEnv(additionalEnvironment)
err = command.RunExecutable("cf", "version")
if err == nil {
err = _cfLogin(command, cloudfoundry.LoginOptions{
CfAPIEndpoint: config.APIEndpoint,
CfOrg: config.Org,
CfSpace: config.Space,
Username: config.Username,
Password: config.Password,
CfLoginOpts: strings.Fields(config.LoginParameters),
})
}
if err == nil {
loginPerformed = true
err = command.RunExecutable("cf", []string{"plugins"}...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", []string{"plugins"})
}
}
if err == nil {
err = command.RunExecutable("cf", cfDeployParams...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", cfDeployParams)
}
}
if err == nil && postDeployAction != nil {
err = postDeployAction(command)
}
if loginPerformed {
logoutErr := _cfLogout(command)
if logoutErr != nil {
log.Entry().WithError(logoutErr).Errorf("Cannot perform cf logout")
if err == nil {
err = logoutErr
}
}
}
if err != nil || GeneralConfig.Verbose {
e := handleCfCliLog(cfLogFile)
if e != nil {
log.Entry().WithError(err).Errorf("Error reading cf log file '%s'.", cfLogFile)
}
}
return err
}
func findMtar() (string, error) {
const pattern = "**/*.mtar"
mtars, err := fileUtils.Glob(pattern)
if err != nil {
return "", err
}
if len(mtars) == 0 {
return "", fmt.Errorf("No mtar file matching pattern '%s' found", pattern)
}
if len(mtars) > 1 {
sMtars := []string{}
sMtars = append(sMtars, mtars...)
return "", fmt.Errorf("Found multiple mtar files matching pattern '%s' (%s), please specify file via parameter 'mtarPath'", pattern, strings.Join(sMtars, ","))
}
return mtars[0], nil
}
func handleCfCliLog(logFile string) error {
fExists, err := fileUtils.FileExists(logFile)
if err != nil {
return err
}
log.Entry().Info("### START OF CF CLI TRACE OUTPUT ###")
if fExists {
f, err := os.Open(logFile)
if err != nil {
return err
}
defer f.Close()
bReader := bufio.NewReader(f)
for {
line, err := bReader.ReadString('\n')
if err == nil || err == io.EOF {
// maybe inappropriate to log as info. Maybe the line from the
// log indicates an error, but that is something like a project
// standard.
log.Entry().Info(strings.TrimSuffix(line, "\n"))
}
if err != nil {
break
}
}
} else {
log.Entry().Warningf("No trace file found at '%s'", logFile)
}
log.Entry().Info("### END OF CF CLI TRACE OUTPUT ###")
return err
}
| {
return fmt.Errorf("Cannot handle credentials inside mta extension files: %w", err)
} | conditional_block |
cloudFoundryDeploy.go | package cmd
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/SAP/jenkins-library/pkg/cloudfoundry"
"github.com/SAP/jenkins-library/pkg/command"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/piperutils"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/yaml"
"github.com/elliotchance/orderedmap"
"github.com/pkg/errors"
)
type cfFileUtil interface {
FileExists(string) (bool, error)
FileRename(string, string) error
FileRead(string) ([]byte, error)
FileWrite(path string, content []byte, perm os.FileMode) error
Getwd() (string, error)
Glob(string) ([]string, error)
Chmod(string, os.FileMode) error
Copy(string, string) (int64, error)
Stat(path string) (os.FileInfo, error)
}
var _now = time.Now
var _cfLogin = cfLogin
var _cfLogout = cfLogout
var _getManifest = getManifest
var _replaceVariables = yaml.Substitute
var _getVarsOptions = cloudfoundry.GetVarsOptions
var _getVarsFileOptions = cloudfoundry.GetVarsFileOptions
var _environ = os.Environ
var fileUtils cfFileUtil = piperutils.Files{}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogin(c command.ExecRunner, options cloudfoundry.LoginOptions) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Login(options)
}
// for simplify mocking. Maybe we find a more elegant way (mock for CFUtils)
func cfLogout(c command.ExecRunner) error {
cf := &cloudfoundry.CFUtils{Exec: c}
return cf.Logout()
}
const defaultSmokeTestScript = `#!/usr/bin/env bash
# this is simply testing if the application root returns HTTP STATUS_CODE
curl -so /dev/null -w '%{response_code}' https://$1 | grep $STATUS_CODE`
func cloudFoundryDeploy(config cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux) {
// for command execution use Command
c := command.Command{}
// reroute command output to logging framework
c.Stdout(log.Writer())
c.Stderr(log.Writer())
// for http calls import piperhttp "github.com/SAP/jenkins-library/pkg/http"
// and use a &piperhttp.Client{} in a custom system
// Example: step checkmarxExecuteScan.go
// error situations should stop execution through log.Entry().Fatal() call which leads to an os.Exit(1) in the end
err := runCloudFoundryDeploy(&config, telemetryData, influxData, &c)
if err != nil {
log.Entry().WithError(err).Fatalf("step execution failed: %s", err)
}
}
func runCloudFoundryDeploy(config *cloudFoundryDeployOptions, telemetryData *telemetry.CustomData, influxData *cloudFoundryDeployInflux, command command.ExecRunner) error {
log.Entry().Infof("General parameters: deployTool='%s', deployType='%s', cfApiEndpoint='%s', cfOrg='%s', cfSpace='%s'",
config.DeployTool, config.DeployType, config.APIEndpoint, config.Org, config.Space)
err := validateAppName(config.AppName)
if err != nil {
return err
}
validateDeployTool(config)
var deployTriggered bool
if config.DeployTool == "mtaDeployPlugin" {
deployTriggered = true
err = handleMTADeployment(config, command)
} else if config.DeployTool == "cf_native" {
deployTriggered = true
err = handleCFNativeDeployment(config, command)
} else {
log.Entry().Warningf("Found unsupported deployTool ('%s'). Skipping deployment. Supported deploy tools: 'mtaDeployPlugin', 'cf_native'", config.DeployTool)
}
if deployTriggered {
prepareInflux(err == nil, config, influxData)
}
return err
}
func validateDeployTool(config *cloudFoundryDeployOptions) {
if config.DeployTool != "" || config.BuildTool == "" {
return
}
switch config.BuildTool {
case "mta":
config.DeployTool = "mtaDeployPlugin"
default:
config.DeployTool = "cf_native"
}
log.Entry().Infof("Parameter deployTool not specified - deriving from buildTool '%s': '%s'",
config.BuildTool, config.DeployTool)
}
func validateAppName(appName string) error {
// for the sake of brevity we consider the empty string as valid app name here
isValidAppName, err := regexp.MatchString("^$|^[a-zA-Z0-9]$|^[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]$", appName)
if err != nil {
return err
}
if isValidAppName {
return nil
}
const (
underscore = "_"
dash = "-"
docuLink = "https://docs.cloudfoundry.org/devguide/deploy-apps/deploy-app.html#basic-settings"
)
log.Entry().Warningf("Your application name '%s' contains non-alphanumeric characters which may lead to errors in the future, "+
"as they are not supported by CloudFoundry. For more details please visit %s", appName, docuLink)
var fail bool
message := []string{fmt.Sprintf("Your application name '%s'", appName)}
if strings.Contains(appName, underscore) {
message = append(message, fmt.Sprintf("contains a '%s' (underscore) which is not allowed, only letters, dashes and numbers can be used.", underscore))
fail = true
}
if strings.HasPrefix(appName, dash) || strings.HasSuffix(appName, dash) {
message = append(message, fmt.Sprintf("starts or ends with a '%s' (dash) which is not allowed, only letters and numbers can be used.", dash))
fail = true
}
message = append(message, fmt.Sprintf("Please change the name to fit this requirement(s). For more details please visit %s.", docuLink))
if fail {
return fmt.Errorf(strings.Join(message, " "))
}
return nil
}
func prepareInflux(success bool, config *cloudFoundryDeployOptions, influxData *cloudFoundryDeployInflux) {
if influxData == nil {
return
}
result := "FAILURE"
if success {
result = "SUCCESS"
}
influxData.deployment_data.tags.artifactVersion = config.ArtifactVersion
influxData.deployment_data.tags.deployUser = config.Username
influxData.deployment_data.tags.deployResult = result
influxData.deployment_data.tags.cfAPIEndpoint = config.APIEndpoint
influxData.deployment_data.tags.cfOrg = config.Org
influxData.deployment_data.tags.cfSpace = config.Space
// n/a (literally) is also reported in groovy
influxData.deployment_data.fields.artifactURL = "n/a"
influxData.deployment_data.fields.commitHash = config.CommitHash
influxData.deployment_data.fields.deployTime = strings.ToUpper(_now().Format("Jan 02 2006 15:04:05"))
// we should discuss how we handle the job trigger
// 1.) outside Jenkins
// 2.) inside Jenkins (how to get)
influxData.deployment_data.fields.jobTrigger = "n/a"
}
func handleMTADeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
mtarFilePath := config.MtaPath
if len(mtarFilePath) == 0 {
var err error
mtarFilePath, err = findMtar()
if err != nil {
return err
}
log.Entry().Debugf("Using mtar file '%s' found in workspace", mtarFilePath)
} else {
exists, err := fileUtils.FileExists(mtarFilePath)
if err != nil {
return errors.Wrapf(err, "Cannot check if file path '%s' exists", mtarFilePath)
}
if !exists {
return fmt.Errorf("mtar file '%s' retrieved from configuration does not exist", mtarFilePath)
}
log.Entry().Debugf("Using mtar file '%s' from configuration", mtarFilePath)
}
return deployMta(config, mtarFilePath, command)
}
type deployConfig struct {
DeployCommand string
DeployOptions []string
AppName string
ManifestFile string
SmokeTestScript []string
}
func handleCFNativeDeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
deployType, err := checkAndUpdateDeployTypeForNotSupportedManifest(config)
if err != nil {
return err
}
var deployCommand string
var smokeTestScript []string
var deployOptions []string
// deploy command will be provided by the prepare functions below
if deployType == "blue-green" {
deployCommand, deployOptions, smokeTestScript, err = prepareBlueGreenCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf native deployment. DeployType '%s'", deployType)
}
} else if deployType == "standard" {
deployCommand, deployOptions, smokeTestScript, err = prepareCfPushCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf push native deployment. DeployType '%s'", deployType)
}
} else {
return fmt.Errorf("Invalid deploy type received: '%s'. Supported values: %v", deployType, []string{"blue-green", "standard"})
}
appName, err := getAppName(config)
if err != nil {
return err
}
manifestFile, err := getManifestFileName(config)
log.Entry().Infof("CF native deployment ('%s') with:", config.DeployType)
log.Entry().Infof("cfAppName='%s'", appName)
log.Entry().Infof("cfManifest='%s'", manifestFile)
log.Entry().Infof("cfManifestVariables: '%v'", config.ManifestVariables)
log.Entry().Infof("cfManifestVariablesFiles: '%v'", config.ManifestVariablesFiles)
log.Entry().Infof("cfdeployDockerImage: '%s'", config.DeployDockerImage)
log.Entry().Infof("smokeTestScript: '%s'", config.SmokeTestScript)
additionalEnvironment := []string{
"STATUS_CODE=" + strconv.FormatInt(int64(config.SmokeTestStatusCode), 10),
}
if len(config.DockerPassword) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_DOCKER_PASSWORD="+config.DockerPassword)
}
myDeployConfig := deployConfig{
DeployCommand: deployCommand,
DeployOptions: deployOptions,
AppName: config.AppName,
ManifestFile: config.Manifest,
SmokeTestScript: smokeTestScript,
}
log.Entry().Infof("DeployConfig: %v", myDeployConfig)
return deployCfNative(myDeployConfig, config, additionalEnvironment, command)
}
func deployCfNative(deployConfig deployConfig, config *cloudFoundryDeployOptions, additionalEnvironment []string, cmd command.ExecRunner) error {
deployStatement := []string{
deployConfig.DeployCommand,
}
if len(deployConfig.AppName) > 0 {
deployStatement = append(deployStatement, deployConfig.AppName)
}
if len(deployConfig.DeployOptions) > 0 {
deployStatement = append(deployStatement, deployConfig.DeployOptions...)
}
if len(deployConfig.ManifestFile) > 0 {
deployStatement = append(deployStatement, "-f")
deployStatement = append(deployStatement, deployConfig.ManifestFile)
}
if len(config.DeployDockerImage) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-image", config.DeployDockerImage)
}
if len(config.DockerUsername) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-username", config.DockerUsername)
}
if len(deployConfig.SmokeTestScript) > 0 {
deployStatement = append(deployStatement, deployConfig.SmokeTestScript...)
}
if len(config.CfNativeDeployParameters) > 0 {
deployStatement = append(deployStatement, strings.Fields(config.CfNativeDeployParameters)...)
}
stopOldAppIfRunning := func(_cmd command.ExecRunner) error {
if config.KeepOldInstance && config.DeployType == "blue-green" {
oldAppName := deployConfig.AppName + "-old"
var buff bytes.Buffer
_cmd.Stdout(&buff)
defer func() {
_cmd.Stdout(log.Writer())
}()
err := _cmd.RunExecutable("cf", "stop", oldAppName)
if err != nil {
cfStopLog := buff.String()
if !strings.Contains(cfStopLog, oldAppName+" not found") {
return fmt.Errorf("Could not stop application '%s'. Error: %s", oldAppName, cfStopLog)
}
log.Entry().Infof("Cannot stop application '%s' since this appliation was not found.", oldAppName)
} else {
log.Entry().Infof("Old application '%s' has been stopped.", oldAppName)
}
}
return nil
}
return cfDeploy(config, deployStatement, additionalEnvironment, stopOldAppIfRunning, cmd)
}
func getManifest(name string) (cloudfoundry.Manifest, error) {
return cloudfoundry.ReadManifest(name)
}
func getManifestFileName(config *cloudFoundryDeployOptions) (string, error) {
manifestFileName := config.Manifest
if len(manifestFileName) == 0 {
manifestFileName = "manifest.yml"
}
return manifestFileName, nil
}
func getAppName(config *cloudFoundryDeployOptions) (string, error) {
if len(config.AppName) > 0 {
return config.AppName, nil
}
if config.DeployType == "blue-green" {
return "", fmt.Errorf("Blue-green plugin requires app name to be passed (see https://github.com/bluemixgaragelondon/cf-blue-green-deploy/issues/27)")
}
manifestFile, err := getManifestFileName(config)
fileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !fileExists {
return "", fmt.Errorf("Manifest file '%s' not found. Cannot retrieve app name", manifestFile)
}
manifest, err := _getManifest(manifestFile)
if err != nil {
return "", err
}
apps, err := manifest.GetApplications()
if err != nil {
return "", err
}
if len(apps) == 0 {
return "", fmt.Errorf("No apps declared in manifest '%s'", manifestFile)
}
namePropertyExists, err := manifest.ApplicationHasProperty(0, "name")
if err != nil {
return "", err
}
if !namePropertyExists {
return "", fmt.Errorf("No appName available in manifest '%s'", manifestFile)
}
appName, err := manifest.GetApplicationProperty(0, "name")
if err != nil {
return "", err
}
var name string
var ok bool
if name, ok = appName.(string); !ok {
return "", fmt.Errorf("appName from manifest '%s' has wrong type", manifestFile)
}
if len(name) == 0 {
return "", fmt.Errorf("appName from manifest '%s' is empty", manifestFile)
}
return name, nil
}
func handleSmokeTestScript(smokeTestScript string) ([]string, error) {
if smokeTestScript == "blueGreenCheckScript.sh" {
// what should we do if there is already a script with the given name? Should we really overwrite ...
err := fileUtils.FileWrite(smokeTestScript, []byte(defaultSmokeTestScript), 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to write default smoke-test script: %w", err)
}
log.Entry().Debugf("smoke test script '%s' has been written.", smokeTestScript)
}
if len(smokeTestScript) > 0 {
err := fileUtils.Chmod(smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
smokeTest, err := handleSmokeTestScript(config.SmokeTestScript)
if err != nil {
return "", []string{}, []string{}, err
}
var deployOptions = []string{}
if !config.KeepOldInstance {
deployOptions = append(deployOptions, "--delete-old-apps")
}
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !manifestFileExists {
log.Entry().Infof("Manifest file '%s' does not exist", manifestFile)
} else {
manifestVariables, err := toStringInterfaceMap(toParameterMap(config.ManifestVariables))
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare manifest variables: '%v'", config.ManifestVariables)
}
manifestVariablesFiles, err := validateManifestVariablesFiles(config.ManifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot validate manifest variables files '%v'", config.ManifestVariablesFiles)
}
modified, err := _replaceVariables(manifestFile, manifestVariables, manifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrap(err, "Cannot prepare manifest file")
}
if modified {
log.Entry().Infof("Manifest file '%s' has been updated (variable substitution)", manifestFile)
} else {
log.Entry().Infof("Manifest file '%s' has not been updated (no variable substitution)", manifestFile)
}
err = handleLegacyCfManifest(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot handle legacy manifest '%s'", manifestFile)
}
}
return "blue-green-deploy", deployOptions, smokeTest, nil
}
// validateManifestVariablesFiles: in case the only provided file is 'manifest-variables.yml' and this file does not
// exist we ignore that file. For any other file there is no check if that file exists. In case several files are
// provided we also do not check for the default file 'manifest-variables.yml'
func validateManifestVariablesFiles(manifestVariablesFiles []string) ([]string, error) {
const defaultManifestVariableFileName = "manifest-variables.yml"
if len(manifestVariablesFiles) == 1 && manifestVariablesFiles[0] == defaultManifestVariableFileName {
// we have only the default file. Most likely this is not configured, but we simply have the default.
// In case this file does not exist we ignore that file.
exists, err := fileUtils.FileExists(defaultManifestVariableFileName)
if err != nil {
return []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", defaultManifestVariableFileName)
}
if !exists {
return []string{}, nil
}
}
return manifestVariablesFiles, nil
}
func toParameterMap(parameters []string) (*orderedmap.OrderedMap, error) {
parameterMap := orderedmap.NewOrderedMap()
for _, p := range parameters {
keyVal := strings.Split(p, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("Invalid parameter provided (expected format <key>=<val>: '%s'", p)
}
parameterMap.Set(keyVal[0], keyVal[1])
}
return parameterMap, nil
}
func handleLegacyCfManifest(manifestFile string) error {
manifest, err := _getManifest(manifestFile)
if err != nil {
return err
}
err = manifest.Transform()
if err != nil {
return err
}
if manifest.IsModified() {
err = manifest.WriteManifest()
if err != nil {
return err
}
log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
| }
if config.DeployType == "bg-deploy" || config.DeployType == "blue-green" {
deployCommand = "bg-deploy"
const noConfirmFlag = "--no-confirm"
if !piperutils.ContainsString(deployParams, noConfirmFlag) {
deployParams = append(deployParams, noConfirmFlag)
}
}
cfDeployParams := []string{
deployCommand,
mtarFilePath,
}
if len(deployParams) > 0 {
cfDeployParams = append(cfDeployParams, deployParams...)
}
extFileParams, extFiles := handleMtaExtensionDescriptors(config.MtaExtensionDescriptor)
for _, extFile := range extFiles {
_, err := fileUtils.Copy(extFile, extFile+".original")
if err != nil {
return fmt.Errorf("Cannot prepare mta extension files: %w", err)
}
_, _, err = handleMtaExtensionCredentials(extFile, config.MtaExtensionCredentials)
if err != nil {
return fmt.Errorf("Cannot handle credentials inside mta extension files: %w", err)
}
}
cfDeployParams = append(cfDeployParams, extFileParams...)
err := cfDeploy(config, cfDeployParams, nil, nil, command)
for _, extFile := range extFiles {
renameError := fileUtils.FileRename(extFile+".original", extFile)
if err == nil && renameError != nil {
return renameError
}
}
return err
}
func handleMtaExtensionCredentials(extFile string, credentials map[string]interface{}) (updated, containsUnresolved bool, err error) {
log.Entry().Debugf("Inserting credentials into extension file '%s'", extFile)
b, err := fileUtils.FileRead(extFile)
if err != nil {
return false, false, errors.Wrapf(err, "Cannot handle credentials for mta extension file '%s'", extFile)
}
content := string(b)
env, err := toMap(_environ(), "=")
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
missingCredentials := []string{}
for name, credentialKey := range credentials {
credKey, ok := credentialKey.(string)
if !ok {
return false, false, fmt.Errorf("cannot handle mta extension credentials: Cannot cast '%v' (type %T) to string", credentialKey, credentialKey)
}
const allowedVariableNamePattern = "^[-_A-Za-z0-9]+$"
alphaNumOnly := regexp.MustCompile(allowedVariableNamePattern)
if !alphaNumOnly.MatchString(name) {
return false, false, fmt.Errorf("credential key name '%s' contains unsupported character. Must contain only %s", name, allowedVariableNamePattern)
}
pattern := regexp.MustCompile("<%=\\s*" + name + "\\s*%>")
if pattern.MatchString(content) {
cred := env[toEnvVarKey(credKey)]
if len(cred) == 0 {
missingCredentials = append(missingCredentials, credKey)
continue
}
content = pattern.ReplaceAllLiteralString(content, cred)
updated = true
log.Entry().Debugf("Mta extension credentials handling: Placeholder '%s' has been replaced by credential denoted by '%s'/'%s' in file '%s'", name, credKey, toEnvVarKey(credKey), extFile)
} else {
log.Entry().Debugf("Mta extension credentials handling: Variable '%s' is not used in file '%s'", name, extFile)
}
}
if len(missingCredentials) > 0 {
missinCredsEnvVarKeyCompatible := []string{}
for _, missingKey := range missingCredentials {
missinCredsEnvVarKeyCompatible = append(missinCredsEnvVarKeyCompatible, toEnvVarKey(missingKey))
}
// ensure stable order of the entries. Needed e.g. for the tests.
sort.Strings(missingCredentials)
sort.Strings(missinCredsEnvVarKeyCompatible)
return false, false, fmt.Errorf("cannot handle mta extension credentials: No credentials found for '%s'/'%s'. Are these credentials maintained?", missingCredentials, missinCredsEnvVarKeyCompatible)
}
if !updated {
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has not been updated. Seems to contain no credentials.", extFile)
} else {
fInfo, err := fileUtils.Stat(extFile)
fMode := fInfo.Mode()
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
err = fileUtils.FileWrite(extFile, []byte(content), fMode)
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has been updated.", extFile)
}
re := regexp.MustCompile(`<%=.+%>`)
placeholders := re.FindAll([]byte(content), -1)
containsUnresolved = (len(placeholders) > 0)
if containsUnresolved {
log.Entry().Warningf("mta extension credential handling: Unresolved placeholders found after inserting credentials: %s", placeholders)
}
return updated, containsUnresolved, nil
}
func toEnvVarKey(key string) string {
key = regexp.MustCompile(`[^A-Za-z0-9]`).ReplaceAllString(key, "_")
return strings.ToUpper(regexp.MustCompile(`([a-z0-9])([A-Z])`).ReplaceAllString(key, "${1}_${2}"))
}
func toMap(keyValue []string, separator string) (map[string]string, error) {
result := map[string]string{}
for _, entry := range keyValue {
kv := strings.Split(entry, separator)
if len(kv) < 2 {
return map[string]string{}, fmt.Errorf("Cannot convert to map: separator '%s' not found in entry '%s'", separator, entry)
}
result[kv[0]] = strings.Join(kv[1:], separator)
}
return result, nil
}
func handleMtaExtensionDescriptors(mtaExtensionDescriptor string) ([]string, []string) {
var result = []string{}
var extFiles = []string{}
for _, part := range strings.Fields(strings.Trim(mtaExtensionDescriptor, " ")) {
if part == "-e" || part == "" {
continue
}
// REVISIT: maybe check if the extension descriptor exists
extFiles = append(extFiles, part)
}
if len(extFiles) > 0 {
result = append(result, "-e")
result = append(result, strings.Join(extFiles, ","))
}
return result, extFiles
}
func cfDeploy(
config *cloudFoundryDeployOptions,
cfDeployParams []string,
additionalEnvironment []string,
postDeployAction func(command command.ExecRunner) error,
command command.ExecRunner) error {
const cfLogFile = "cf.log"
var err error
var loginPerformed bool
additionalEnvironment = append(additionalEnvironment, "CF_TRACE="+cfLogFile)
if len(config.CfHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_HOME="+config.CfHome)
}
if len(config.CfPluginHome) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_PLUGIN_HOME="+config.CfPluginHome)
}
log.Entry().Infof("Using additional environment variables: %s", additionalEnvironment)
// TODO set HOME to config.DockerWorkspace
command.SetEnv(additionalEnvironment)
err = command.RunExecutable("cf", "version")
if err == nil {
err = _cfLogin(command, cloudfoundry.LoginOptions{
CfAPIEndpoint: config.APIEndpoint,
CfOrg: config.Org,
CfSpace: config.Space,
Username: config.Username,
Password: config.Password,
CfLoginOpts: strings.Fields(config.LoginParameters),
})
}
if err == nil {
loginPerformed = true
err = command.RunExecutable("cf", []string{"plugins"}...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", []string{"plugins"})
}
}
if err == nil {
err = command.RunExecutable("cf", cfDeployParams...)
if err != nil {
log.Entry().WithError(err).Errorf("Command '%s' failed.", cfDeployParams)
}
}
if err == nil && postDeployAction != nil {
err = postDeployAction(command)
}
if loginPerformed {
logoutErr := _cfLogout(command)
if logoutErr != nil {
log.Entry().WithError(logoutErr).Errorf("Cannot perform cf logout")
if err == nil {
err = logoutErr
}
}
}
if err != nil || GeneralConfig.Verbose {
e := handleCfCliLog(cfLogFile)
if e != nil {
log.Entry().WithError(err).Errorf("Error reading cf log file '%s'.", cfLogFile)
}
}
return err
}
func findMtar() (string, error) {
const pattern = "**/*.mtar"
mtars, err := fileUtils.Glob(pattern)
if err != nil {
return "", err
}
if len(mtars) == 0 {
return "", fmt.Errorf("No mtar file matching pattern '%s' found", pattern)
}
if len(mtars) > 1 {
sMtars := []string{}
sMtars = append(sMtars, mtars...)
return "", fmt.Errorf("Found multiple mtar files matching pattern '%s' (%s), please specify file via parameter 'mtarPath'", pattern, strings.Join(sMtars, ","))
}
return mtars[0], nil
}
func handleCfCliLog(logFile string) error {
fExists, err := fileUtils.FileExists(logFile)
if err != nil {
return err
}
log.Entry().Info("### START OF CF CLI TRACE OUTPUT ###")
if fExists {
f, err := os.Open(logFile)
if err != nil {
return err
}
defer f.Close()
bReader := bufio.NewReader(f)
for {
line, err := bReader.ReadString('\n')
if err == nil || err == io.EOF {
// maybe inappropriate to log as info. Maybe the line from the
// log indicates an error, but that is something like a project
// standard.
log.Entry().Info(strings.TrimSuffix(line, "\n"))
}
if err != nil {
break
}
}
} else {
log.Entry().Warningf("No trace file found at '%s'", logFile)
}
log.Entry().Info("### END OF CF CLI TRACE OUTPUT ###")
return err
} | if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...) | random_line_split |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
// .with_gl(gl_request)
// .with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed, ..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
}, | },
WindowEvent::MouseInput { button, state: Released, ..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position, .. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical), .. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines), .. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input, .. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
} | _ => ()
} | random_line_split |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
// .with_gl(gl_request)
// .with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn | (source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed, ..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
},
_ => ()
}
},
WindowEvent::MouseInput { button, state: Released, ..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position, .. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical), .. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines), .. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input, .. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
}
| load | identifier_name |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
// .with_gl(gl_request)
// .with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else | ;
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed, ..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
},
_ => ()
}
},
WindowEvent::MouseInput { button, state: Released, ..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position, .. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical), .. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines), .. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input, .. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
}
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
}
| {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
} | conditional_block |
viewer.rs | use std::f32::consts::PI;
use std::os::raw::c_void;
use std::path::Path;
use std::process;
use std::time::Instant;
use cgmath::{ Deg, Point3 };
use collision::Aabb;
use gl;
use gltf;
use glutin;
use glutin::{
Api,
MouseScrollDelta,
MouseButton,
GlContext,
GlRequest,
GlProfile,
VirtualKeyCode,
WindowEvent,
};
use glutin::dpi::PhysicalSize;
use glutin::ElementState::*;
use image::{DynamicImage};
use controls::{OrbitControls, NavState};
use controls::CameraMovement::*;
use framebuffer::Framebuffer;
use importdata::ImportData;
use render::*;
use render::math::*;
use utils::{print_elapsed, FrameTimer, gl_check_error, print_context_info};
// TODO!: complete and pass through draw calls? or get rid of multiple shaders?
// How about state ordering anyway?
// struct DrawState {
// current_shader: ShaderFlags,
// back_face_culling_enabled: bool
// }
pub struct CameraOptions {
pub index: i32,
pub position: Option<Vector3>,
pub target: Option<Vector3>,
pub fovy: Deg<f32>,
pub straight: bool,
}
pub struct GltfViewer {
size: PhysicalSize,
dpi_factor: f64,
orbit_controls: OrbitControls,
first_mouse: bool,
last_x: f32,
last_y: f32,
events_loop: Option<glutin::EventsLoop>,
gl_window: Option<glutin::GlWindow>,
// TODO!: get rid of scene?
root: Root,
scene: Scene,
delta_time: f64, // seconds
last_frame: Instant,
render_timer: FrameTimer,
}
/// Note about `headless` and `visible`: True headless rendering doesn't work on
/// all operating systems, but an invisible window usually works
impl GltfViewer {
pub fn new(
source: &str,
width: u32,
height: u32,
headless: bool,
visible: bool,
camera_options: CameraOptions,
scene_index: usize,
) -> GltfViewer {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
// .with_gl(gl_request)
// .with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
|
fn process_input(input: glutin::KeyboardInput, controls: &mut OrbitControls) -> bool {
let pressed = match input.state {
Pressed => true,
Released => false
};
if let Some(code) = input.virtual_keycode {
match code {
VirtualKeyCode::Escape if pressed => return false,
VirtualKeyCode::W | VirtualKeyCode::Up => controls.process_keyboard(FORWARD, pressed),
VirtualKeyCode::S | VirtualKeyCode::Down => controls.process_keyboard(BACKWARD, pressed),
VirtualKeyCode::A | VirtualKeyCode::Left => controls.process_keyboard(LEFT, pressed),
VirtualKeyCode::D | VirtualKeyCode::Right => controls.process_keyboard(RIGHT, pressed),
_ => ()
}
}
true
}
| {
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed, ..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
},
_ => ()
}
},
WindowEvent::MouseInput { button, state: Released, ..} => {
match (button, orbit_controls.state.clone()) {
(MouseButton::Left, NavState::Rotating) | (MouseButton::Right, NavState::Panning) => {
orbit_controls.state = NavState::None;
orbit_controls.handle_mouse_up();
},
_ => ()
}
}
WindowEvent::CursorMoved { position, .. } => {
let ph = position.to_physical(*dpi_factor);
orbit_controls.handle_mouse_move(ph)
},
WindowEvent::MouseWheel { delta: MouseScrollDelta::PixelDelta(logical), .. } => {
let ph = logical.to_physical(*dpi_factor);
orbit_controls.process_mouse_scroll(ph.y as f32);
}
WindowEvent::MouseWheel { delta: MouseScrollDelta::LineDelta(_rows, lines), .. } => {
orbit_controls.process_mouse_scroll(lines * 3.0);
}
WindowEvent::KeyboardInput { input, .. } => {
keep_running = process_input(input, &mut orbit_controls);
}
_ => ()
},
_ => ()
}
});
keep_running
} | identifier_body |
cq_reactor.rs | //! The Completion Queue Reactor. Functions like any other async/await reactor, but is driven by
//! IRQs triggering wakeups in order to poll NVME completion queues (see `CompletionFuture`).
//!
//! While the reactor is primarily intended to wait for IRQs and then poll completion queues, it
//! can also be used for notifying when a full submission queue can submit a new command (see
//! `AvailableSqEntryFuture`).
use std::convert::TryFrom;
use std::fs::File;
use std::future::Future;
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::{mem, task, thread};
use syscall::data::Event;
use syscall::Result;
use crossbeam_channel::Receiver; | use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
}
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message, .. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker, .. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id, .. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id, .. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn start_cq_reactor_thread(
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState<'a, F> {
// the future is in its initial state: the command has not been submitted yet, and no interest
// has been registered. this state will repeat until a free submission queue entry appears to
// it, which it probably will since queues aren't supposed to be nearly always be full.
PendingSubmission {
cmd_init: F,
nvme: &'a Nvme,
sq_id: SqId,
},
PendingCompletion {
nvme: &'a Nvme,
cq_id: CqId,
cmd_id: CmdId,
sq_id: SqId,
message: Arc<Mutex<Option<CompletionMessage>>>,
},
Finished,
Placeholder,
}
pub struct CompletionFuture<'a, F> {
pub state: CompletionFutureState<'a, F>,
}
// enum not self-referential
impl<F> Unpin for CompletionFuture<'_, F> {}
impl<F> Future for CompletionFuture<'_, F>
where
F: FnOnce(CmdId) -> NvmeCmd,
{
type Output = NvmeComp;
fn poll(self: Pin<&mut Self>, context: &mut task::Context) -> task::Poll<Self::Output> {
let this = &mut self.get_mut().state;
match mem::replace(this, CompletionFutureState::Placeholder) {
CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id } => {
let sqs_read_guard = nvme.submission_queues.read().unwrap();
let &(ref sq_lock, cq_id) = sqs_read_guard
.get(&sq_id)
.expect("nvmed: internal error: given SQ for SQ ID not there");
let mut sq_guard = sq_lock.lock().unwrap();
let sq = &mut *sq_guard;
if sq.is_full() {
// when the CQ reactor gets a new completion queue entry, it'll lock the
// submisson queue it came from. since we're holding the same lock, this
// message will always be sent before the reactor is done with the entry.
nvme.reactor_sender.send(NotifReq::RequestAvailSubmission { sq_id, waker: context.waker().clone() }).unwrap();
*this = CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id };
return task::Poll::Pending;
}
let cmd_id =
u16::try_from(sq.tail).expect("nvmed: internal error: CQ has more than 2^16 entries");
let tail = sq.submit_unchecked(cmd_init(cmd_id));
let tail = u16::try_from(tail).unwrap();
// make sure that we register interest before the reactor can get notified
let message = Arc::new(Mutex::new(None));
*this = CompletionFutureState::PendingCompletion { nvme, cq_id, cmd_id, sq_id, message: Arc::clone(&message), };
nvme.reactor_sender.send(NotifReq::RequestCompletion { cq_id, sq_id, cmd_id, message, waker: context.waker().clone() }).expect("reactor dead");
unsafe { nvme.submission_queue_tail(sq_id, tail) };
task::Poll::Pending
}
CompletionFutureState::PendingCompletion {
message,
cq_id,
cmd_id,
sq_id,
nvme,
} => {
if let Some(value) = message.lock().unwrap().take() {
*this = CompletionFutureState::Finished;
return task::Poll::Ready(value.cq_entry);
}
nvme.reactor_sender.send(NotifReq::RequestCompletion {
cq_id,
sq_id,
cmd_id,
waker: context.waker().clone(),
message: Arc::clone(&message),
}).expect("reactor dead");
*this = CompletionFutureState::PendingCompletion { message, cq_id, cmd_id, sq_id, nvme };
task::Poll::Pending
}
CompletionFutureState::Finished => {
panic!("calling poll() on an already finished CompletionFuture")
}
CompletionFutureState::Placeholder => unreachable!(),
}
}
} | random_line_split | |
cq_reactor.rs | //! The Completion Queue Reactor. Functions like any other async/await reactor, but is driven by
//! IRQs triggering wakeups in order to poll NVME completion queues (see `CompletionFuture`).
//!
//! While the reactor is primarily intended to wait for IRQs and then poll completion queues, it
//! can also be used for notifying when a full submission queue can submit a new command (see
//! `AvailableSqEntryFuture`).
use std::convert::TryFrom;
use std::fs::File;
use std::future::Future;
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::{mem, task, thread};
use syscall::data::Event;
use syscall::Result;
use crossbeam_channel::Receiver;
use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
}
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message, .. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker, .. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id, .. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id, .. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn | (
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState<'a, F> {
// the future is in its initial state: the command has not been submitted yet, and no interest
// has been registered. this state will repeat until a free submission queue entry appears to
// it, which it probably will since queues aren't supposed to be nearly always be full.
PendingSubmission {
cmd_init: F,
nvme: &'a Nvme,
sq_id: SqId,
},
PendingCompletion {
nvme: &'a Nvme,
cq_id: CqId,
cmd_id: CmdId,
sq_id: SqId,
message: Arc<Mutex<Option<CompletionMessage>>>,
},
Finished,
Placeholder,
}
pub struct CompletionFuture<'a, F> {
pub state: CompletionFutureState<'a, F>,
}
// enum not self-referential
impl<F> Unpin for CompletionFuture<'_, F> {}
impl<F> Future for CompletionFuture<'_, F>
where
F: FnOnce(CmdId) -> NvmeCmd,
{
type Output = NvmeComp;
fn poll(self: Pin<&mut Self>, context: &mut task::Context) -> task::Poll<Self::Output> {
let this = &mut self.get_mut().state;
match mem::replace(this, CompletionFutureState::Placeholder) {
CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id } => {
let sqs_read_guard = nvme.submission_queues.read().unwrap();
let &(ref sq_lock, cq_id) = sqs_read_guard
.get(&sq_id)
.expect("nvmed: internal error: given SQ for SQ ID not there");
let mut sq_guard = sq_lock.lock().unwrap();
let sq = &mut *sq_guard;
if sq.is_full() {
// when the CQ reactor gets a new completion queue entry, it'll lock the
// submisson queue it came from. since we're holding the same lock, this
// message will always be sent before the reactor is done with the entry.
nvme.reactor_sender.send(NotifReq::RequestAvailSubmission { sq_id, waker: context.waker().clone() }).unwrap();
*this = CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id };
return task::Poll::Pending;
}
let cmd_id =
u16::try_from(sq.tail).expect("nvmed: internal error: CQ has more than 2^16 entries");
let tail = sq.submit_unchecked(cmd_init(cmd_id));
let tail = u16::try_from(tail).unwrap();
// make sure that we register interest before the reactor can get notified
let message = Arc::new(Mutex::new(None));
*this = CompletionFutureState::PendingCompletion { nvme, cq_id, cmd_id, sq_id, message: Arc::clone(&message), };
nvme.reactor_sender.send(NotifReq::RequestCompletion { cq_id, sq_id, cmd_id, message, waker: context.waker().clone() }).expect("reactor dead");
unsafe { nvme.submission_queue_tail(sq_id, tail) };
task::Poll::Pending
}
CompletionFutureState::PendingCompletion {
message,
cq_id,
cmd_id,
sq_id,
nvme,
} => {
if let Some(value) = message.lock().unwrap().take() {
*this = CompletionFutureState::Finished;
return task::Poll::Ready(value.cq_entry);
}
nvme.reactor_sender.send(NotifReq::RequestCompletion {
cq_id,
sq_id,
cmd_id,
waker: context.waker().clone(),
message: Arc::clone(&message),
}).expect("reactor dead");
*this = CompletionFutureState::PendingCompletion { message, cq_id, cmd_id, sq_id, nvme };
task::Poll::Pending
}
CompletionFutureState::Finished => {
panic!("calling poll() on an already finished CompletionFuture")
}
CompletionFutureState::Placeholder => unreachable!(),
}
}
}
| start_cq_reactor_thread | identifier_name |
cq_reactor.rs | //! The Completion Queue Reactor. Functions like any other async/await reactor, but is driven by
//! IRQs triggering wakeups in order to poll NVME completion queues (see `CompletionFuture`).
//!
//! While the reactor is primarily intended to wait for IRQs and then poll completion queues, it
//! can also be used for notifying when a full submission queue can submit a new command (see
//! `AvailableSqEntryFuture`).
use std::convert::TryFrom;
use std::fs::File;
use std::future::Future;
use std::io::prelude::*;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::{mem, task, thread};
use syscall::data::Event;
use syscall::Result;
use crossbeam_channel::Receiver;
use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> |
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message, .. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker, .. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id, .. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id, .. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn start_cq_reactor_thread(
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState<'a, F> {
// the future is in its initial state: the command has not been submitted yet, and no interest
// has been registered. this state will repeat until a free submission queue entry appears to
// it, which it probably will since queues aren't supposed to be nearly always be full.
PendingSubmission {
cmd_init: F,
nvme: &'a Nvme,
sq_id: SqId,
},
PendingCompletion {
nvme: &'a Nvme,
cq_id: CqId,
cmd_id: CmdId,
sq_id: SqId,
message: Arc<Mutex<Option<CompletionMessage>>>,
},
Finished,
Placeholder,
}
pub struct CompletionFuture<'a, F> {
pub state: CompletionFutureState<'a, F>,
}
// enum not self-referential
impl<F> Unpin for CompletionFuture<'_, F> {}
impl<F> Future for CompletionFuture<'_, F>
where
F: FnOnce(CmdId) -> NvmeCmd,
{
type Output = NvmeComp;
fn poll(self: Pin<&mut Self>, context: &mut task::Context) -> task::Poll<Self::Output> {
let this = &mut self.get_mut().state;
match mem::replace(this, CompletionFutureState::Placeholder) {
CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id } => {
let sqs_read_guard = nvme.submission_queues.read().unwrap();
let &(ref sq_lock, cq_id) = sqs_read_guard
.get(&sq_id)
.expect("nvmed: internal error: given SQ for SQ ID not there");
let mut sq_guard = sq_lock.lock().unwrap();
let sq = &mut *sq_guard;
if sq.is_full() {
// when the CQ reactor gets a new completion queue entry, it'll lock the
// submisson queue it came from. since we're holding the same lock, this
// message will always be sent before the reactor is done with the entry.
nvme.reactor_sender.send(NotifReq::RequestAvailSubmission { sq_id, waker: context.waker().clone() }).unwrap();
*this = CompletionFutureState::PendingSubmission { cmd_init, nvme, sq_id };
return task::Poll::Pending;
}
let cmd_id =
u16::try_from(sq.tail).expect("nvmed: internal error: CQ has more than 2^16 entries");
let tail = sq.submit_unchecked(cmd_init(cmd_id));
let tail = u16::try_from(tail).unwrap();
// make sure that we register interest before the reactor can get notified
let message = Arc::new(Mutex::new(None));
*this = CompletionFutureState::PendingCompletion { nvme, cq_id, cmd_id, sq_id, message: Arc::clone(&message), };
nvme.reactor_sender.send(NotifReq::RequestCompletion { cq_id, sq_id, cmd_id, message, waker: context.waker().clone() }).expect("reactor dead");
unsafe { nvme.submission_queue_tail(sq_id, tail) };
task::Poll::Pending
}
CompletionFutureState::PendingCompletion {
message,
cq_id,
cmd_id,
sq_id,
nvme,
} => {
if let Some(value) = message.lock().unwrap().take() {
*this = CompletionFutureState::Finished;
return task::Poll::Ready(value.cq_entry);
}
nvme.reactor_sender.send(NotifReq::RequestCompletion {
cq_id,
sq_id,
cmd_id,
waker: context.waker().clone(),
message: Arc::clone(&message),
}).expect("reactor dead");
*this = CompletionFutureState::PendingCompletion { message, cq_id, cmd_id, sq_id, nvme };
task::Poll::Pending
}
CompletionFutureState::Finished => {
panic!("calling poll() on an already finished CompletionFuture")
}
CompletionFutureState::Placeholder => unreachable!(),
}
}
}
| {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
} | identifier_body |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() |
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync + 'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
}
}
| {
for v in d.visuals() {
return (v.visual_id(), depth);
}
} | conditional_block |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() {
for v in d.visuals() {
return (v.visual_id(), depth);
}
}
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
| impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync + 'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
}
} | self.pos = (x as i16, y as i16);
}
}
| random_line_split |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() {
for v in d.visuals() {
return (v.visual_id(), depth);
}
}
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn | (&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync + 'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
}
}
| get_atom | identifier_name |
xcbwin.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::ops::Drop;
use std::thread;
use window::Dock;
use cairo;
use cairo::XCBSurface;
use cairo_sys;
use xcb;
use xcb::*;
fn get_visualid_from_depth(scr: Screen, depth: u8) -> (Visualid, u8) {
for d in scr.allowed_depths() {
if depth == d.depth() {
for v in d.visuals() {
return (v.visual_id(), depth);
}
}
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync + 'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) |
}
| {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
} | identifier_body |
lib.rs | //! jlrs is a crate that provides access to most of the Julia C API, it can be used to embed Julia
//! in Rust applications and to use functionality from the Julia C API when writing `ccall`able
//! functions in Rust. Currently this crate is only tested on Linux in combination with Julia 1.6
//! and is not compatible with earlier versions of Julia.
//!
//! The documentation assumes you have a basic understanding of Julia's type system.
//!
//! # Features
//!
//! An incomplete list of features that are currently supported by jlrs:
//!
//! - Access arbitrary Julia modules and their contents.
//! - Call Julia functions, including functions that take keyword arguments.
//! - Exceptions can be handled or converted to their error message, optionally with color.
//! - Include and call your own Julia code.
//! - Use a custom system image.
//! - Create values that Julia can use, and convert them back to Rust, from Rust.
//! - Access the type information and fields of values. The contents of inline and bits-union
//! fields can be accessed directly.
//! - Create and use n-dimensional arrays. The `jlrs-ndarray` feature can be enabled for
//! integration with ndarray.
//! - Support for mapping Julia structs to Rust structs that can be generated by JlrsReflect.jl.
//! - Structs that can be mapped to Rust include those with type parameters and bits unions.
//! - An async runtime is available when the `async` feature is enabled, which can be used from
//! multiple threads and supports scheduling Julia `Task`s and `await`ing the result without
//! blocking the runtime.
//!
//!
//! # Generating the bindings
//!
//! This crate depends on jl-sys which contains the raw bindings to the Julia C API, by default
//! pregenerated bindings are used. If you want to generate the bindings at compile time, the
//! `use-bindgen` feature must be enabled. In this case the bindings are generated by bindgen. You
//! can find the requirements for using bindgen in [their User Guide]
//!
//! #### Linux
//!
//! The recommended way to install Julia is to download the binaries from the official website,
//! which is distributed in an archive containing a directory called `julia-x.y.z`. This directory
//! contains several other directories, including a `bin` directory containing the `julia`
//! executable.
//!
//! In order to ensure the `julia.h` header file can be found, either `/usr/include/julia/julia.h`
//! or `/usr/local/include/julia/julia.h`
//! must exist, or you have to set the `JULIA_DIR` environment variable to `/path/to/julia-x.y.z`.
//! This environment variable can be used to override the default. Similarly, in order to load
//! `libjulia.so` you must add `/path/to/julia-x.y.z/lib` to the `LD_LIBRARY_PATH` environment
//! variable. When the `uv` feature is enabled, `/path/to/julia-x.y.z/lib/julia` must also be
//! added to `LD_LIBRARY_PATH`.
//!
//! #### Windows
//!
//! If you want to use jlrs on Windows you must use WSL. An installation guide to install WSL on
//! Windows can be found [on Microsoft's website]. After installing a Linux distribution, follow
//! the installation instructions for Linux.
//!
//!
//! # Using this crate
//!
//! The first thing you should do is `use` the [`prelude`]-module with an asterisk, this will
//! bring all the structs and traits you're likely to need into scope. When embedding Julia, it
//! must be initialized before it can be used. You can do this by calling [`Julia::init`] which
//! returns an instance of [`Julia`]. Note that this method can only be called once while the
//! application is running; if you drop it you won't be able to create a new instance but have to
//! restart the application. If you want to use a custom system image, you must call
//! [`Julia::init_with_image`] instead of `Julia::init`. If you're calling Rust from Julia
//! everything has already been initialized, you can use `CCall` instead. If you want to use the
//! async runtime, one of the initialization methods of [`AsyncJulia`] must be used.
//!
//!
//! ## Calling Julia from Rust
//!
//! After initialization you have an instance of [`Julia`], [`Julia::include`] can be used to
//! include files with custom Julia code. In order to call Julia functions and create new values
//! that can be used by these functions, [`Julia::scope`] and [`Julia::scope_with_slots`] must be
//! used. These two methods take a closure with two arguments, a [`Global`] and a mutable
//! reference to a [`GcFrame`]. `Global` is a token that is used to access Julia modules, their
//! contents and other global values, while `GcFrame` is used to root local values. Rooting a
//! value in a frame prevents it from being freed by the garbage collector until that frame has
//! been dropped. The frame is created when `Julia::scope(_with_slots)` is called and dropped
//! when that method returns.
//!
//! Because you can use both a `Global` and a mutable reference to a `GcFrame` inside the closure,
//! it's possible to access the contents of modules and create new values that can be used by
//! Julia. The methods of [`Module`] let you access the contents of arbitrary modules, several
//! methods are available to create new values.
//!
//! The simplest is to call [`Value::eval_string`], a method that takes two arguments. The first
//! must implement the [`Scope`] trait, the second is a string which has to contain valid Julia
//! code. The most important thing to know about the [`Scope`] trait for now is that it's used
//! by functions that create new values to ensure the result is rooted. Mutable references to
//! [`GcFrame`]s implement [`Scope`], in this case the [`Value`] that is returned is rooted in
//! that frame, so the result is protected from garbage collection until the frame is dropped when
//! that scope ends.
//!
//! In practice, [`Value::eval_string`] is relatively limited. It can be used to evaluate simple
//! function calls like `sqrt(2.0)`, but can't take any arguments. Its most important use-case is
//! importing installed packages by evaluating an `import` or `using` statement. A more
//! interesting method, [`Value::new`], can be used with data of any type that implements
//! [`IntoJulia`]. This trait is implemented by primitive types like `i8` and `char`. Any type
//! that implements [`IntoJulia`] also implements [`Unbox`] which is used to extract the contents
//! of a Julia value.
//!
//! In addition to evaluating raw commands with `Value::eval_string`, it's possible to call
//! anything that implements [`Call`] as a Julia function, `Value` implements this trait because
//! any Julia value is potentially callable as a function. Functions can be called with any number
//! of positional arguments and be provided with keyword arguments. Both `Value::eval_string` and
//! the trait methods of `Call` are all unsafe. It's trivial to write a function like
//! `boom() = unsafe_load(Ptr{Float64}(C_NULL))`, which causes a segfault when it's called, and
//! call it with these methods.
//!
//! As a simple example, let's convert two numbers to Julia values and add them:
//!
//! ```no_run
//! use jlrs::prelude::*;
//!
//! # fn main() {
//! // Initializing Julia is unsafe because it can race with another crate that does
//! // the same.
//! let mut julia = unsafe { Julia::init().unwrap() };
//! let res = julia.scope(|global, frame| {
//! // Create the two arguments. Note that the first argument, something that
//! // implements Scope, is taken by value and mutable references don't implement
//! // Copy, so it's necessary to mutably reborrow the frame.
//! let i = Value::new(&mut *frame, 2u64)?;
//! let j = Value::new(&mut *frame, 1u32)?;
//!
//! // The `+` function can be found in the base module.
//! let func = Module::base(global).function(&mut *frame, "+")?;
//!
//! // Call the function and unbox the result as a `u64`. The result of the function
//! // call is a nested `Result`; the outer error doesn't contain to any Julia
//! // data, while the inner error contains the exception if one is thrown. Here the
//! // exception is converted to the outer error type by calling `into_jlrs_result`, this new
//! // error contains the error message Julia would have shown. Colors can be enabled by
//! // calling `Julia::error_color`.
//! unsafe {
//! func.call2(&mut *frame, i, j)?
//! .into_jlrs_result()?
//! .unbox::<u64>()
//! }
//! }).unwrap();
//!
//! assert_eq!(res, 3);
//! # }
//! ```
//!
//! Many more features are available, including creating and accessing n-dimensional Julia arrays
//! and nesting scopes. To learn how to use them, please see the documentation for the [`memory`]
//! and [`wrappers`] modules.
//!
//!
//! ## Calling Rust from Julia
//!
//! Julia's `ccall` interface can be used to call `extern "C"` functions defined in Rust, for most
//! use-cases you shouldn't need jlrs. There are two major ways to use `ccall`, with a pointer to
//! the function or a `(:function, "library")` pair.
//!
//! A function can be cast to a void pointer and converted to a [`Value`]:
//!
//! ```no_run
//! # use jlrs::prelude::*;
//! // This function will be provided to Julia as a pointer, so its name can be mangled.
//! unsafe extern "C" fn call_me(arg: bool) -> isize {
//! if arg {
//! 1
//! } else {
//! -1
//! }
//! }
//!
//! # fn main() {
//! let mut julia = unsafe { Julia::init().unwrap() };
//! julia.scope(|global, frame| unsafe {
//! // Cast the function to a void pointer
//! let call_me_val = Value::new(&mut *frame, call_me as *mut std::ffi::c_void)?;
//!
//! // Value::eval_string can be used to create new functions.
//! let func = Value::eval_string(
//! &mut *frame,
//! "myfunc(callme::Ptr{Cvoid})::Int = ccall(callme, Int, (Bool,), true)"
//! )?.unwrap();
//!
//! // Call the function and unbox the result.
//! let output = func.call1(&mut *frame, call_me_val)?
//! .into_jlrs_result()?
//! .unbox::<isize>()?;
//!
//! assert_eq!(output, 1);
//!
//! Ok(())
//! }).unwrap();
//! # }
//! ```
//!
//! You can also use functions defined in `dylib` and `cdylib` libraries. In order to create such
//! a library you need to add
//!
//! ```toml
//! [lib]
//! crate-type = ["dylib"]
//! ```
//!
//! or
//!
//! ```toml
//! [lib]
//! crate-type = ["cdylib"]
//! ```
//!
//! respectively to your crate's `Cargo.toml`. Use a `dylib` if you want to use the crate in other
//! Rust crates, but if it's only intended to be called through `ccall` a `cdylib` is the better
//! choice. On Linux, compiling such a crate will be compiled to `lib<crate_name>.so`.
//!
//! The functions you want to use with `ccall` must be both `extern "C"` functions to ensure the C
//! ABI is used, and annotated with `#[no_mangle]` to prevent name mangling. Julia can find
//! libraries in directories that are either on the default library search path or included by
//! setting the `LD_LIBRARY_PATH` environment variable on Linux. If the compiled library is not
//! directly visible to Julia, you can open it with `Libdl.dlopen` and acquire function pointers
//! with `Libdl.dlsym`. These pointers can be called the same way as the pointer in the previous
//! example.
//!
//! If the library is visible to Julia you can access it with the library name. If `call_me` is
//! defined in a crate called `foo`, the following should work if the function is annotated with
//! `#[no_mangle]`:
//!
//! ```julia
//! ccall((:call_me, "libfoo"), Int, (Bool,), false)
//! ```
//!
//! One important aspect of calling Rust from other languages in general is that panicking across
//! an FFI boundary is undefined behaviour. If you're not sure your code will never panic, wrap it
//! with `std::panic::catch_unwind`.
//!
//! Most features provided by jlrs including accessing modules, calling functions, and borrowing
//! array data require a [`Global`] or a frame. You can access these by creating a [`CCall`]
//! first. Another method provided by [`CCall`] is [`CCall::uv_async_send`], this method can be
//! used in combination with `Base.AsyncCondition`. In particular, it lets you write a `ccall`able
//! function that does its actual work on another thread, return early and `wait` on the async
//! condition, which happens when [`CCall::uv_async_send`] is called when that work is finished.
//! The advantage of this is that the long-running function will not block the Julia runtime,
//! There's an example available on GitHub that shows how to do this.
//!
//!
//! ## Async runtime
//!
//! The async runtime runs Julia in a separate thread and returns a handle that can be shared
//! across threads. The handle can be used to send new tasks to the runtime, multiple tasks can
//! run in parallel by scheduling a function call as a new Julia `Task`. While the Julia `Task`
//! has not completed, the runtime can switch to another task. To use this feature you must enable
//! the `async` feature flag:
//!
//! ```toml
//! [dependencies]
//! jlrs = { version = "0.12", features = ["async"] }
//! ```
//!
//! The struct [`AsyncJulia`] is exported by the prelude and lets you initialize the runtime in
//! two ways, either as a blocking task or as a thread. The first way should be used if you want
//! to integrate the async runtime into a larger project that uses `async_std`.
//!
//! The easiest way to interact with Julia when using the async runtime is by using
//! `AsyncJulia::blocking_task`, which can be used to send a closure like the one in the first
//! example and call it. While this closure has not completed the runtime is blocked, the methods
//! that schedule a function call as a new Julia `Task` can't be used.
//!
//! In order to write non-blocking tasks, you must implement either the [`AsyncTask`] or
//! [`GeneratorTask`] trait. An `AsyncTask` can be called once, its async `run` method replaces
//! the closure; this method takes a `Global` and a mutable reference [`AsyncGcFrame`]. The
//! `AsyncGcFrame` provides mostly the same functionality as `GcFrame`, but can also be used to
//! call the methods of the [`CallAsync`] trait. These methods schedule the function call on
//! another thread and return a `Future`. While awaiting the result the runtime can handle another
//! task.
//!
//!A `GeneratorTask` can be called multiple times. In addition to `run` it also has an async `init` method. This method is called when the `GeneratorTask` is created and can be used to prepare the initial state of the task. The frame provided to `init` is not dropped after this method returns, which means this initial state can contain Julia data. Whenever a `GeneratorTask` is successfully created a `GeneratorHandle` is returned. This handle can be used to call the `GeneratorTask` which calls its `run` method once. A `GeneratorHandle` can be cloned and shared across threads.
//!
//! You can find basic examples that show how to implement these traits in
//! [the examples directory of the GitHub repository].
//!
//!
//! # Testing
//!
//! The restriction that Julia can be initialized once must be taken into account when running
//! tests that use `jlrs`. The recommended approach is to create a thread-local static `RefCell`:
//!
//! ```no_run
//! use jlrs::prelude::*;
//! use std::cell::RefCell;
//! thread_local! {
//! pub static JULIA: RefCell<Julia> = {
//! let julia = RefCell::new(unsafe { Julia::init().unwrap() });
//! julia.borrow_mut().scope(|_global, _frame| {
//! /* include everything you need to use */
//! Ok(())
//! }).unwrap();
//! julia
//! };
//! }
//! ```
//!
//! Tests that use this construct can only use one thread for testing, so you must use
//! `cargo test -- --test-threads=1`, otherwise the code above will panic when a test
//! tries to call `Julia::init` a second time from another thread.
//!
//! If these tests also involve the async runtime, the `JULIA_NUM_THREADS` environment
//! variable must be set to a value larger than 2.
//!
//! If you want to run jlrs's tests, both these requirements must be taken into account:
//! `JULIA_NUM_THREADS=3 cargo test -- --test-threads=1`
//!
//!
//! # Custom types
//!
//! In order to map a struct in Rust to one in Julia you can derive [`ValidLayout`], [`Unbox`],
//! and [`Typecheck`]. If the struct in Julia has no type parameters and is a bits type you can
//! also derive [`IntoJulia`], which lets you use the type in combination with [`Value::new`].
//!
//! You should normally not need to implement these structs or traits manually. The JlrsReflect.jl
//! package can generate the correct Rust struct and automatically derive the supported traits for
//! types that have no tuple or union fields with type parameters. The reason for this restriction
//! is that the layout of tuple and union fields can be very different depending on these
//! parameters in a way that can't be expressed in Rust.
//!
//! These custom types can also be used when you call Rust from Julia with `ccall`.
//!
//! [their User Guide]: https://rust-lang.github.io/rust-bindgen/requirements.html
//! [on Microsoft's website]: https://docs.microsoft.com/en-us/windows/wsl/install-win10
//! [the examples directory of the repo]: https://github.com/Taaitaaiger/jlrs/tree/master/examples
//! [`IntoJulia`]: crate::convert::into_julia::IntoJulia
//! [`Typecheck`]: crate::layout::typecheck::Typecheck
//! [`ValidLayout`]: crate::layout::valid_layout::ValidLayout
//! [`Unbox`]: crate::convert::unbox::Unbox
//! [`CallAsync::call_async`]: crate::extensions::multitask::call_async::CallAsync
//! [`AsyncGcFrame`]: crate::extensions::multitask::async_frame::AsyncGcFrame
//! [`Frame`]: crate::memory::frame::Frame
//! [`AsyncTask`]: crate::extensions::multitask::async_task::AsyncTask
//! [`GeneratorTask`]: crate::extensions::multitask::async_task::GeneratorTask
//! [`GeneratorHandle`]: crate::extensions::multitask::async_task::GeneratorHandle
//! [`AsyncJulia`]: crate::extensions::multitask::AsyncJulia
//! [`CallAsync`]: crate::extensions::multitask::call_async::CallAsync
//! [`DataType`]: crate::wrappers::ptr::datatype::DataType
//! [`TypedArray`]: crate::wrappers::ptr::array::TypedArray
//! [`Output`]: crate::memory::output::Output
//! [`OutputScope`]: crate::memory::output::OutputScope
//! [`ScopeExt`]: crate::memory::scope::ScopeExt
//! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope
//! [`Scope`]: crate::memory::scope::Scope
//! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope
//! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope
#![forbid(rustdoc::broken_intra_doc_links)]
pub mod convert;
pub mod error;
pub mod extensions;
pub mod info;
pub mod layout;
pub mod memory;
pub mod prelude;
pub(crate) mod private;
#[doc(hidden)]
pub mod util;
pub mod wrappers;
use convert::into_jlrs_result::IntoJlrsResult;
use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE};
use info::Info;
#[cfg(feature = "uv")]
use jl_sys::uv_async_send;
use jl_sys::{
jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image,
jl_is_initialized,
};
use memory::frame::{GcFrame, NullFrame};
use memory::global::Global;
use memory::mode::Sync;
use memory::stack_page::StackPage;
use prelude::Wrapper;
use private::Private;
use std::ffi::CString;
use std::io::{Error as IOError, ErrorKind};
use std::mem::{self, MaybeUninit};
use std::path::Path;
use std::ptr::null_mut;
use std::slice;
use std::sync::atomic::{AtomicBool, Ordering};
use wrappers::ptr::module::Module;
use wrappers::ptr::string::JuliaString;
use wrappers::ptr::value::Value;
use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _};
pub(crate) static INIT: AtomicBool = AtomicBool::new(false);
pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl");
/// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`]
/// before you can do anything related to Julia. While this struct exists Julia is active,
/// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized.
pub struct Julia {
page: StackPage,
}
impl Julia {
/// Initialize Julia, this method can only be called once. If it's called a second time it
/// will return an error. If this struct is dropped, you will need to restart your program to
/// be able to call Julia code again.
///
/// This method is unsafe because it can race with another crate initializing Julia.
pub unsafe fn init() -> JlrsResult<Self> {
if jl_is_initialized() != 0 || INIT.swap(true, Ordering::SeqCst) {
return Err(JlrsError::AlreadyInitialized.into());
}
jl_init();
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// This method is similar to [`Julia::init`] except that it loads a custom system image. A
/// custom image can be generated with the [`PackageCompiler`] package for Julia. The main
/// advantage of using a custom image over the default one is that it allows you to avoid much
/// of the compilation overhead often associated with Julia.
///
/// Two arguments are required to call this method compared to [`Julia::init`];
/// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a
/// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if !julia_bindir.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
}
if !image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results. The frame will preallocate `slots` slots.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope_with_slots(1, |_global, frame| {
/// // Uses the preallocated slot
/// let _i = Value::new(&mut *frame, 1u64)?;
/// // Allocates a new slot, because only a single slot was preallocated
/// let _j = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
if slots + 2 > self.page.size() {
self.page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Provides access to global information.
pub fn info(&self) -> Info {
Info::new()
}
}
impl Drop for Julia {
fn drop(&mut self) {
unsafe {
jl_atexit_hook(0);
}
}
}
/// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to
/// initialize it again would cause a crash. In order to still be able to call Julia from Rust
/// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to
/// create a frame first. You can use this struct to do so. It must never be used outside
/// functions called through `ccall`, and only once for each `ccall`ed function.
///
/// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`].
/// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the
/// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at
/// all.
pub struct CCall {
page: Option<StackPage>,
}
impl CCall {
/// Create a new `CCall`. This function must never be called outside a function called through
/// `ccall` from Julia and must only be called once during that call. The stack is not
/// allocated until a [`GcFrame`] is created.
pub unsafe fn new() -> Self {
CCall { page: None }
}
/// Wake the task associated with `handle`. The handle must be the `handle` field of a
/// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from
/// Julia with ccall in another thread and wait for it to complete in Julia without blocking,
/// there's an example available in the repository: ccall_with_threads.
///
/// This method is only available if the `uv` feature is enabled.
#[cfg(feature = "uv")]
pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool {
uv_async_send(handle.cast()) == 0
}
/// Creates a [`GcFrame`], calls the given closure, and returns its result.
pub fn | <T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let page = self.get_init_page();
let global = Global::new();
let mut frame = GcFrame::new(page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// Creates a [`GcFrame`] with `slots` slots, calls the given closure, and returns its result.
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let page = self.get_init_page();
let global = Global::new();
if slots + 2 > page.size() {
*page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Create a [`NullFrame`] and call the given closure. A [`NullFrame`] cannot be nested and
/// can only be used to (mutably) borrow array data. Unlike other scope-methods, no `Global`
/// is provided to the closure.
pub fn null_scope<'base, 'julia: 'base, T, F>(&'julia mut self, func: F) -> JlrsResult<T>
where
F: FnOnce(&mut NullFrame<'base>) -> JlrsResult<T>,
{
unsafe {
let mut frame = NullFrame::new(self);
func(&mut frame)
}
}
#[inline(always)]
fn get_init_page(&mut self) -> &mut StackPage {
if self.page.is_none() {
self.page = Some(StackPage::default());
}
self.page.as_mut().unwrap()
}
}
unsafe extern "C" fn droparray(a: Array) {
// The data of a moved array is allocated by Rust, this function is called by
// a finalizer in order to ensure it's also freed by Rust.
let mut arr_nn_ptr = a.unwrap_non_null(Private);
let arr_ref = arr_nn_ptr.as_mut();
if arr_ref.flags.how() != 2 {
return;
}
// Set data to null pointer
let data_ptr = arr_ref.data.cast::<MaybeUninit<u8>>();
arr_ref.data = null_mut();
// Set all dims to 0
let arr_ptr = arr_nn_ptr.as_ptr();
let dims_ptr = jl_array_dims_ptr(arr_ptr);
let n_dims = jl_array_ndims(arr_ptr);
let mut_dims_slice = slice::from_raw_parts_mut(dims_ptr, n_dims as _);
for dim in mut_dims_slice {
*dim = 0;
}
// Drop the data
let n_els = arr_ref.elsize as usize * arr_ref.length;
let data = Vec::from_raw_parts(data_ptr, n_els, n_els);
mem::drop(data);
}
| scope | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.