file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
render.py | super(ZoomGroup, self).__init__(parent)
self.zoom = zoom
def set_state(self):
pyglet.gl.glPushMatrix()
pyglet.gl.glScalef(self.zoom, self.zoom, 1)
def unset_state(self):
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.zoom == other.zoom and
self.parent == other.parent
)
def __hash__(self):
return hash((self.zoom, self.parent))
def __repr__(self):
return '%s(zoom=%d)' % (self.__class__.__name__, self.zoom)
class CameraGroup(pyglet.graphics.Group):
def __init__(self, window, zoom_factor, focus=None, parent=None):
super(CameraGroup, self).__init__(parent)
self.window = window
self.zoom_factor = zoom_factor
self.focus = focus
def set_state(self):
if self.focus is not None:
cam_x = self.window.width / 2 - self.focus.x * self.zoom_factor
cam_y = self.window.height / 2 - self.focus.y * self.zoom_factor
pyglet.gl.gl.glPushMatrix()
pyglet.gl.gl.glTranslatef(cam_x, cam_y, 0)
def unset_state(self):
if self.focus is not None:
pyglet.gl.glPopMatrix()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
self.window is other.window and
self.zoom_factor == other.zoom_factor and
self.parent == other.parent
)
def __hash__(self):
return hash((self.window, self.zoom_factor, self.parent))
class Animation(pyglet.event.EventDispatcher):
def __init__(self, duration):
self.elapsed = 0.0
self.duration = duration
pyglet.clock.schedule_interval(self._animate, 0.001)
def cancel(self):
pyglet.clock.unschedule(self._animate)
self.dispatch_event('on_finish', self)
def get_elapsed_ratio(self):
return self.elapsed / self.duration
def _animate(self, dt):
self.elapsed += dt
if self.elapsed > self.duration:
self.cancel()
else:
self.dispatch_event('on_update', self, dt)
Animation.register_event_type('on_update')
Animation.register_event_type('on_finish')
class Renderable(Component):
COMPONENT_NAME = 'renderable'
def __init__(self, image, memorable=False):
self._image = image
self.memorable = memorable
image = event_property('_image', 'image_change')
class | (Component):
COMPONENT_NAME = 'layout_renderable'
def __init__(self, tile):
self.tile = tile
class RenderSystem(object):
zoom = 3
GROUP_LEVEL = pyglet.graphics.OrderedGroup(0)
GROUP_DIGITS = pyglet.graphics.OrderedGroup(1)
GROUP_HUD = pyglet.graphics.OrderedGroup(2)
def __init__(self, level):
self._level = level
self._window = level.game.game.window
self._batch = pyglet.graphics.Batch()
self._animations = set()
self._sprites = {}
self._level_vlist = None
self._light_overlay = None
self._last_messages_view = LastMessagesView(level.game.message_log, self._window.width, self._window.height, batch=self._batch, group=self.GROUP_HUD)
self._hud = HUD(batch=self._batch, group=self.GROUP_HUD)
self._level_group = ZoomGroup(self.zoom, CameraGroup(self._window, self.zoom, self.GROUP_LEVEL))
self._digits_group = CameraGroup(self._window, self.zoom, self.GROUP_DIGITS)
self._memory = collections.defaultdict(list)
def update_player(self):
player_sprite = self._sprites[self._level.player]
self._digits_group.focus = player_sprite
self._level_group.parent.focus = player_sprite
self._hud.player = self._level.player
def render_level(self):
vertices = []
tex_coords = []
for x in xrange(self._level.size_x):
for y in xrange(self._level.size_y):
x1 = x * 8
x2 = x1 + 8
y1 = y * 8
y2 = y1 + 8
for entity in self._level.position_system.get_entities_at(x, y):
renderable = entity.get(LayoutRenderable)
if renderable:
tile = renderable.tile
break
else:
continue
# always add floor, because we wanna draw walls above floor
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(floor_tex.tex_coords)
if tile == LayoutGenerator.TILE_WALL:
# if we got wall, draw it above floor
tex = get_wall_tex(self._level.get_wall_transition(x, y))
vertices.extend((x1, y1, x2, y1, x2, y2, x1, y2))
tex_coords.extend(tex.tex_coords)
group = TextureGroup(dungeon_tex, pyglet.graphics.OrderedGroup(Position.ORDER_FLOOR, self._level_group))
self._level_vlist = self._batch.add(len(vertices) / 2, pyglet.gl.GL_QUADS, group,
('v2i/static', vertices),
('t3f/statc', tex_coords),
)
group = pyglet.graphics.OrderedGroup(Position.ORDER_PLAYER + 1, self._level_group)
self._light_overlay = LightOverlay(self._level.size_x, self._level.size_y, self._batch, group)
def update_light(self, old_lightmap, new_lightmap):
# for all changed cells
for key in set(old_lightmap).union(new_lightmap):
lit = key in new_lightmap
memory = self._memory[key]
# if cell is lit, add it to memory and clear all memory sprites, if there are any
if lit:
for sprite in memory:
sprite.delete()
memory[:] = []
# for every entity in cell
for entity in self._level.position_system.get_entities_at(*key):
# set in_fov flag
# TODO: this doesnt belong to rendering, but i don't want to loop twice
infov = entity.get(InFOV)
if infov:
infov.in_fov = key in new_lightmap
# if renderable, manage sprites/memory
renderable = entity.get(Renderable)
if not renderable:
continue
# if object is lit, show its sprite
sprite = self._sprites[entity]
if lit:
sprite.visible = True
else:
sprite.visible = False
# if it's memorable, add its current image to the memory
if renderable.memorable:
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(renderable.image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
memory.append(sprite)
# update light overlay
self._light_overlay.update_light(new_lightmap, self._memory)
def add_entity(self, entity):
image = entity.get(Renderable).image
pos = entity.get(Position)
group = pyglet.graphics.OrderedGroup(pos.order, self._level_group)
sprite = pyglet.sprite.Sprite(image, pos.x * 8, pos.y * 8, batch=self._batch, group=group)
self._sprites[entity] = sprite
entity.listen('image_change', self._on_image_change)
entity.listen('move', self._on_move)
def remove_entity(self, entity):
sprite = self._sprites.pop(entity)
sprite.delete()
entity.unlisten('image_change', self._on_image_change)
entity.unlisten('move', self._on_move)
def _on_image_change(self, entity):
self._sprites[entity].image = entity.get(Renderable).image
def _on_move(self, entity, old_x, old_y, new_x, new_y):
sprite = self._sprites[entity]
target_x = new_x * 8
target_y = new_y * 8
if not sprite.visible:
# don't animate invisible sprites
sprite.set_position(target_x, target_y)
else:
start_x = sprite.x
start_y = sprite.y
anim = Animation(0.25)
@anim.event
def on_update(animation, dt, sprite=sprite, dx=(target_x - start_x), dy=(target_y - start_y)):
ratio = animation.get_elapsed_ratio()
x = round(start_x + dx * ratio)
y = round(start_y + dy * ratio)
sprite.set_position(x, y)
@anim.event
def on_finish(animation, sprite=sprite):
sprite.set_position(target_x, target_y)
self.add_animation(anim)
def draw(self):
self._window.clear()
pyglet.gl.glEnable(pyglet.gl.GL_BLEND)
pyglet.gl.glBlendFunc(pyglet.gl.GL_SRC_ALPHA, pyglet.gl.GL_ONE_MINUS_SRC_ALPHA)
self | LayoutRenderable | identifier_name |
hashsplit.go | the checksum window,
// then the same window can span multiple chunks,
// meaning a chunk boundary is not independent of the preceding chunk.
// If you leave this set to zero,
// 64 is what you'll get.
// If you really mean "I want no minimum,"
// set this to 1.
MinSize int
// SplitBits is the number of trailing zero bits in the rolling checksum required to produce a chunk.
// The default (what you get if you leave it set to zero) is 13,
// which means a chunk boundary occurs on average once every 8,192 bytes.
//
// (But thanks to math, that doesn't mean that 8,192 is the median chunk size.
// The median chunk size is actually the logarithm, base (2^SplitBits-1)/(2^SplitBits), of 0.5.
// That makes the median chunk size 5,678 when SplitBits==13.)
SplitBits uint
// The function to invoke on each chunk produced.
f func([]byte, uint) error
// The chunk being built.
chunk []byte
// This is the recommended rolling-checksum algorithm for hashsplitting
// according to the document at github.com/hashsplit/hashsplit-spec
// (presently in draft form).
rs *buzhash32.Buzhash32
}
// Split hashsplits its input using a default Splitter and the given callback to process chunks.
// See NewSplitter for details about the callback.
func | (r io.Reader, f func([]byte, uint) error) error {
s := NewSplitter(f)
_, err := io.Copy(s, r)
if err != nil {
return err
}
return s.Close()
}
// NewSplitter produces a new Splitter with the given callback.
// The Splitter is an io.WriteCloser.
// As bytes are written to it,
// it finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
| Split | identifier_name |
hashsplit.go | s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = &TreeBuilderNode{
offset: tb.levels[i+1].offset + tb.levels[i+1].size,
}
}
return nil
}
// Root produces the root of the tree after all nodes have been added with calls to Add.
// Root may only be called one time.
// If the tree is empty,
// Root returns a nil Node.
// It is an error to call Add after a call to Root.
//
// The return value of Root is the interface type Node.
// If tb.F is nil, the concrete type will be *TreeBuilderNode.
func (tb *TreeBuilder) Root() (Node, error) {
if len(tb.levels) == 0 {
return nil, nil
}
if len(tb.levels[0].Chunks) > 0 {
for i := 0; i < len(tb.levels)-1; i++ {
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return nil, err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = nil // help the gc reclaim memory sooner, maybe
}
}
// Don't necessarily return the highest node in tb.levels.
// We can prune any would-be root nodes that have only one child.
// If we _are_ going to return tb.levels[len(tb.levels)-1],
// we have to call tb.F on it.
// If we're not, we don't:
// tb.F has already been called on all other nodes.
if len(tb.levels) == 1 {
var result Node = tb.levels[0]
if tb.F != nil {
return tb.F(tb.levels[0])
}
return result, nil
}
top := tb.levels[len(tb.levels)-1]
if len(top.Nodes) > 1 {
if tb.F != nil {
return tb.F(top)
}
return top, nil
}
var (
root Node = top
err error
)
for root.NumChildren() == 1 {
root, err = root.Child(0)
if err != nil {
return nil, err
}
}
return root, nil
}
// ErrNotFound is the error returned by Seek when the seek position lies outside the given node's range.
var ErrNotFound = errors.New("not found")
// Seek finds the level-0 node representing the given byte position
// (i.e., the one where Offset <= pos < Offset+Size).
func Seek(n Node, pos uint64) (Node, error) {
if pos < n.Offset() || pos >= (n.Offset()+n.Size()) | {
return nil, ErrNotFound
} | conditional_block | |
hashsplit.go | when SplitBits==13.)
SplitBits uint
// The function to invoke on each chunk produced.
f func([]byte, uint) error
// The chunk being built.
chunk []byte
// This is the recommended rolling-checksum algorithm for hashsplitting
// according to the document at github.com/hashsplit/hashsplit-spec
// (presently in draft form).
rs *buzhash32.Buzhash32
}
// Split hashsplits its input using a default Splitter and the given callback to process chunks.
// See NewSplitter for details about the callback.
func Split(r io.Reader, f func([]byte, uint) error) error {
s := NewSplitter(f)
_, err := io.Copy(s, r)
if err != nil {
return err
}
return s.Close()
}
// NewSplitter produces a new Splitter with the given callback.
// The Splitter is an io.WriteCloser.
// As bytes are written to it,
// it finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error | {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err | identifier_body | |
hashsplit.go | finds chunk boundaries and calls the callback.
//
// The callback receives the bytes of the chunk,
// and the chunk's "level,"
// which is the number of extra trailing zeroes in the rolling checksum
// (in excess of Splitter.SplitBits).
//
// Do not forget to call Close on the Splitter
// to flush any remaining chunk from its internal buffer.
func NewSplitter(f func([]byte, uint) error) *Splitter {
rs := buzhash32.New()
var zeroes [windowSize]byte
rs.Write(zeroes[:]) // initialize the rolling checksum window
return &Splitter{f: f, rs: rs}
}
// Write implements io.Writer.
// It may produce one or more calls to the callback in s,
// as chunks are discovered.
// Any error from the callback will cause Write to return early with that error.
func (s *Splitter) Write(inp []byte) (int, error) {
minSize := s.MinSize
if minSize <= 0 {
minSize = defaultMinSize
}
for i, c := range inp {
s.chunk = append(s.chunk, c)
s.rs.Roll(c)
if len(s.chunk) < minSize {
continue
}
if level, shouldSplit := s.checkSplit(); shouldSplit {
err := s.f(s.chunk, level)
if err != nil {
return i, err
}
s.chunk = nil
}
}
return len(inp), nil
}
// Close implements io.Closer.
// It is necessary to call Close to flush any buffered chunk remaining.
// Calling Close may result in a call to the callback in s.
// It is an error to call Write after a call to Close.
// Close is idempotent:
// it can safely be called multiple times without error
// (and without producing the final chunk multiple times).
func (s *Splitter) Close() error {
if len(s.chunk) == 0 {
return nil
}
level, _ := s.checkSplit()
err := s.f(s.chunk, level)
s.chunk = nil
return err
}
func (s *Splitter) checkSplit() (uint, bool) {
splitBits := s.SplitBits
if splitBits == 0 {
splitBits = defaultSplitBits
}
h := s.rs.Sum32()
tz := uint(bits.TrailingZeros32(h))
if tz >= splitBits {
return tz - splitBits, true
}
return 0, false
}
// Node is the abstract type of a node in a hashsplit tree.
// See TreeBuilder for details.
type Node interface {
// Offset gives the position in the original byte stream that is the first byte represented by this node.
Offset() uint64
// Size gives the number of bytes in the original byte stream that this node represents.
Size() uint64
// NumChildren gives the number of subnodes of this node.
// This is only for interior nodes of the tree (level 1 and higher).
// For leaf nodes (level 0) this must return zero.
NumChildren() int
// Child returns the subnode with the given index from 0 through NumChildren()-1.
Child(int) (Node, error)
}
// TreeBuilderNode is the concrete type implementing the Node interface that is used internally by TreeBuilder.
// Callers may transform this into any other node type during tree construction using the TreeBuilder.F callback.
//
// A interior node ("level 1" and higher) contains one or more subnodes as children.
// A leaf node ("level 0") contains one or more byte slices,
// which are hashsplit chunks of the input.
// Exactly one of Nodes and Chunks is non-empty.
type TreeBuilderNode struct {
// Nodes is the list of subnodes.
// This is empty for leaf nodes (level 0) and non-empty for interior nodes (level 1 and higher).
Nodes []Node
// Chunks is a list of chunks.
// This is non-empty for leaf nodes (level 0) and empty for interior nodes (level 1 and higher).
Chunks [][]byte
size, offset uint64
}
// Offset implements Node.Offset,
// the position of the first byte of the underlying input represented by this node.
func (n *TreeBuilderNode) Offset() uint64 { return n.offset }
// Size implements Node.Size,
// the number of bytes of the underlying input represented by this node.
func (n *TreeBuilderNode) Size() uint64 { return n.size }
// NumChildren implements Node.NumChildren,
// the number of child nodes.
func (n *TreeBuilderNode) NumChildren() int { return len(n.Nodes) }
// Child implements Node.Child.
func (n *TreeBuilderNode) Child(i int) (Node, error) { return n.Nodes[i], nil }
// TreeBuilder assembles a sequence of chunks into a hashsplit tree.
//
// A hashsplit tree provides another level of space-and-bandwidth savings
// over and above what Split gives you.
// Consider, again, the example of adding EXIF tags to a JPEG file.
// Although most chunks of the hashsplitted file will be the same before and after adding tags,
// the _list_ needed to reassemble those chunks into the original file will be very different:
// all the unaffected chunks must shift position to accommodate the new EXIF-containing chunks.
//
// A hashsplit tree organizes that list into a tree instead,
// whose shape is determined by the content of the chunks,
// just as the chunk boundaries are.
// It has the property that only the tree nodes in the vicinity of the change will be affected.
// Most subtrees will remain the same.
//
// Just as each chunk has a level L determined by the rolling checksum
// (see NewSplitter),
// so does each node in the tree have a level, N.
// Tree nodes at level 0 collect chunks at level 0,
// up to and including a chunk at level L>0;
// then a new level-0 node begins.
// Tree nodes at level N>0 collect nodes at level N-1
// up to and including a chunk at level L>N;
// then a new level-N node begins.
type TreeBuilder struct {
// F is an optional function for transforming the TreeBuilder's node representation
// (*TreeBuilderNode)
// into any other type implementing the Node interface.
// This is called on each node as it is completed and added to its parent as a new child.
//
// Callers may wish to perform this transformation when it is not necessary or desirable to keep the full input in memory
// (i.e., the chunks in the leaf nodes),
// such as when the input may be very large.
//
// F is guaranteed to be called exactly once on each node.
//
// If F is nil,
// all nodes in the tree remain *TreeBuilderNode objects.
//
// If this callback return an error,
// the enclosing function -
// Add or Root -
// returns early with that error.
// In that case the TreeBuilder is left in an inconsistent state
// and no further calls to Add or Root are possible.
F func(*TreeBuilderNode) (Node, error)
levels []*TreeBuilderNode
}
// Add adds a new chunk to the TreeBuilder.
// It is typical to call this function in the callback of Split as each chunk is produced.
//
// The level of a chunk is normally the level value passed to the Split callback.
// It results in the creation of a new node at the given level.
// However, this produces a tree with an average branching factor of 2.
// For wider fan-out (more children per node),
// the caller can reduce the value of level.
func (tb *TreeBuilder) Add(bytes []byte, level uint) error {
if len(tb.levels) == 0 {
tb.levels = []*TreeBuilderNode{new(TreeBuilderNode)}
}
tb.levels[0].Chunks = append(tb.levels[0].Chunks, bytes)
for _, n := range tb.levels {
n.size += uint64(len(bytes))
}
for i := uint(0); i < level; i++ {
if i == uint(len(tb.levels))-1 {
tb.levels = append(tb.levels, &TreeBuilderNode{
size: tb.levels[i].size,
})
}
var n Node = tb.levels[i]
if tb.F != nil {
var err error
n, err = tb.F(tb.levels[i])
if err != nil {
return err
}
}
tb.levels[i+1].Nodes = append(tb.levels[i+1].Nodes, n)
tb.levels[i] = &TreeBuilderNode{
offset: tb.levels[i+1].offset + tb.levels[i+1].size,
}
}
return nil
}
// Root produces the root of the tree after all nodes have been added with calls to Add.
// Root may only be called one time.
// If the tree is empty,
// Root returns a nil Node.
// It is an error to call Add after a call to Root.
//
// The return value of Root is the interface type Node.
// If tb.F is nil, the concrete type will be *TreeBuilderNode.
func (tb *TreeBuilder) Root() (Node, error) {
if len(tb.levels) == 0 { | return nil, nil
}
if len(tb.levels[0].Chunks) > 0 {
for i := 0; i < len(tb.levels)-1; i++ { | random_line_split | |
bigrand.rs | uint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng + ?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn | <R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! | sample | identifier_name |
bigrand.rs | _zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng + ?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint | {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit, | identifier_body | |
bigrand.rs | uint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint;
/// Generate a random `BigInt` within the given range. The lower
/// bound is inclusive; the upper bound is exclusive. Fails when
/// the upper bound is not greater than the lower bound.
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt;
}
impl<R: Rng + ?Sized> RandBigInt for R {
fn gen_biguint(&mut self, bit_size: usize) -> BigUint {
use super::big_digit::BITS;
let (digits, rem) = bit_size.div_rem(&BITS);
let mut data = smallvec![BigDigit::default(); digits + (rem > 0) as usize];
// `fill` is faster than many `gen::<u32>` calls
// Internally this calls `SeedableRng` where implementors are responsible for adjusting endianness for reproducable values.
self.fill(data.as_mut_slice());
if rem > 0 {
data[digits] >>= BITS - rem;
}
BigUint::new_native(data)
}
fn gen_bigint(&mut self, bit_size: usize) -> BigInt {
loop {
// Generate a random BigUint...
let biguint = self.gen_biguint(bit_size);
// ...and then randomly assign it a Sign...
let sign = if biguint.is_zero() {
// ...except that if the BigUint is zero, we need to try
// again with probability 0.5. This is because otherwise,
// the probability of generating a zero BigInt would be
// double that of any other number.
if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are | lazy_static! {
| /// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")] | random_line_split |
bigrand.rs | if self.gen() {
continue;
} else {
NoSign
}
} else if self.gen() {
Plus
} else {
Minus
};
return BigInt::from_biguint(sign, biguint);
}
}
fn gen_biguint_below(&mut self, bound: &BigUint) -> BigUint {
assert!(!bound.is_zero());
let bits = bound.bits();
loop {
let n = self.gen_biguint(bits);
if n < *bound {
return n;
}
}
}
fn gen_biguint_range(&mut self, lbound: &BigUint, ubound: &BigUint) -> BigUint {
assert!(*lbound < *ubound);
if lbound.is_zero() {
self.gen_biguint_below(ubound)
} else {
lbound + self.gen_biguint_below(&(ubound - lbound))
}
}
fn gen_bigint_range(&mut self, lbound: &BigInt, ubound: &BigInt) -> BigInt {
assert!(*lbound < *ubound);
if lbound.is_zero() {
BigInt::from(self.gen_biguint_below(magnitude(&ubound)))
} else if ubound.is_zero() {
lbound + BigInt::from(self.gen_biguint_below(magnitude(&lbound)))
} else {
let delta = ubound - lbound;
lbound + BigInt::from(self.gen_biguint_below(magnitude(&delta)))
}
}
}
/// The back-end implementing rand's `UniformSampler` for `BigUint`.
#[derive(Clone, Debug)]
pub struct UniformBigUint {
base: BigUint,
len: BigUint,
}
impl UniformSampler for UniformBigUint {
type X = BigUint;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigUint {
len: high - low,
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
Self::new(low_b, high_b.borrow() + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + rng.gen_biguint_below(&self.len)
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_biguint_range(low, high)
}
}
impl SampleUniform for BigUint {
type Sampler = UniformBigUint;
}
/// The back-end implementing rand's `UniformSampler` for `BigInt`.
#[derive(Clone, Debug)]
pub struct UniformBigInt {
base: BigInt,
len: BigUint,
}
impl UniformSampler for UniformBigInt {
type X = BigInt;
#[inline]
fn new<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low < high);
UniformBigInt {
len: into_magnitude(high - low),
base: low.clone(),
}
}
#[inline]
fn new_inclusive<B1, B2>(low_b: B1, high_b: B2) -> Self
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
assert!(low <= high);
Self::new(low, high + 1u32)
}
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Self::X {
&self.base + BigInt::from(rng.gen_biguint_below(&self.len))
}
#[inline]
fn sample_single<R: Rng + ?Sized, B1, B2>(low_b: B1, high_b: B2, rng: &mut R) -> Self::X
where
B1: SampleBorrow<Self::X> + Sized,
B2: SampleBorrow<Self::X> + Sized,
{
let low = low_b.borrow();
let high = high_b.borrow();
rng.gen_bigint_range(low, high)
}
}
impl SampleUniform for BigInt {
type Sampler = UniformBigInt;
}
/// A random distribution for `BigUint` and `BigInt` values of a particular bit size.
#[derive(Clone, Copy, Debug)]
pub struct RandomBits {
bits: usize,
}
impl RandomBits {
#[inline]
pub fn new(bits: usize) -> RandomBits {
RandomBits { bits }
}
}
impl Distribution<BigUint> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigUint {
rng.gen_biguint(self.bits)
}
}
impl Distribution<BigInt> for RandomBits {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> BigInt {
rng.gen_bigint(self.bits)
}
}
/// A generic trait for generating random primes.
///
/// *Warning*: This is highly dependend on the provided random number generator,
/// to provide actually random primes.
///
/// # Example
#[cfg_attr(feature = "std", doc = " ```")]
#[cfg_attr(not(feature = "std"), doc = " ```ignore")]
/// extern crate rand;
/// extern crate num_bigint_dig as num_bigint;
///
/// use rand::thread_rng;
/// use num_bigint::RandPrime;
///
/// let mut rng = thread_rng();
/// let p = rng.gen_prime(1024);
/// assert_eq!(p.bits(), 1024);
/// ```
///
#[cfg(feature = "prime")]
pub trait RandPrime {
/// Generate a random prime number with as many bits as given.
fn gen_prime(&mut self, bits: usize) -> BigUint;
}
/// A list of small, prime numbers that allows us to rapidly
/// exclude some fraction of composite candidates when searching for a random
/// prime. This list is truncated at the point where smallPrimesProduct exceeds
/// a u64. It does not include two because we ensure that the candidates are
/// odd by construction.
#[cfg(feature = "prime")]
const SMALL_PRIMES: [u8; 15] = [3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53];
#[cfg(feature = "prime")]
lazy_static! {
/// The product of the values in SMALL_PRIMES and allows us
/// to reduce a candidate prime by this number and then determine whether it's
/// coprime to all the elements of SMALL_PRIMES without further BigUint
/// operations.
static ref SMALL_PRIMES_PRODUCT: BigUint = BigUint::from_u64(16_294_579_238_595_022_365).unwrap();
}
#[cfg(feature = "prime")]
impl<R: Rng + ?Sized> RandPrime for R {
fn gen_prime(&mut self, bit_size: usize) -> BigUint {
if bit_size < 2 {
panic!("prime size must be at least 2-bit");
}
let mut b = bit_size % 8;
if b == 0 {
b = 8;
}
let bytes_len = (bit_size + 7) / 8;
let mut bytes = vec![0u8; bytes_len];
loop {
self.fill_bytes(&mut bytes);
// Clear bits in the first byte to make sure the candidate has a size <= bits.
bytes[0] &= ((1u32 << (b as u32)) - 1) as u8;
// Don't let the value be too small, i.e, set the most significant two bits.
// Setting the top two bits, rather than just the top bit,
// means that when two of these values are multiplied together,
// the result isn't ever one bit short.
if b >= 2 | {
bytes[0] |= 3u8.wrapping_shl(b as u32 - 2);
} | conditional_block | |
cartracker.py | # create background subtractor
fgbg = cv2.createBackgroundSubtractorMOG2()
# where the centroids will be stored
pts = deque(maxlen=args["buffer"])
counter = 0
(dX, dY) = (0, 0)
direction = ""
#setting variables before the image processing
frames_count, fps, width, height = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
width = int(width)
height = int(height)
print(frames_count, fps, width, height)
# creates a pandas data frame with the number of rows the same length as frame count
df = pd.DataFrame(index=range(int(frames_count)))
df.index.name = "Frames"
framenumber = 0 # keeps track of current frame
carids = [] # blank list to add car ids
totalcars = 0 # keeps track of total cars
#capturing data
while(True):
# Capture two frames
ret, frame1 = cap.read() # first image
time.sleep(1/25) # slight delay
ret, frame2 = cap.read() # second image
image = cv2.resize(frame1, (0, 0), None, 1,1)
#getting the difference as the basis for movement
diff = cv2.absdiff(frame1,frame2)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
th =25
imask = mask > th
canvas = np.zeros_like(frame2, np.uint8)
canvas[imask] = frame1[imask]
mask = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
#canvas = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# transforms
fgmask = fgbg.apply(mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30,30))
#dilation = cv2.dilate(fgmask, kernel)
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
#opening = cv2.morphologyEx(closing, cv2.MORPH_ERODE, kernel)
mask =closing
# variable for contours
ret,thresh = cv2.threshold(mask,0,255,0)
# creates contours/blobs
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# use convex hull to create polygon around contours
hull = [cv2.convexHull(c) for c in contours]
# draw contours
cv2.drawContours(mask, hull, -1, (0, 255, 0), 2)
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
# line created to stop counting contours, needed as cars in distance become one big contour
lineypos = 400
cv2.line(image, (-100, lineypos), (width, -120), (255, 0, 0), 3) # blue
lineypos2 = -700
cv2.line(image, (-150, lineypos2), (width, 700), (0, 255, 0), 3) # green
cv2.line(image, (-150, -100), (width, 1800), (255, 255,0), 3)
#creating centroids and boxes
for j in range(len(contours)):
if hierarchy[0, j, 3] == -1:
cnt=contours[j]
area = cv2.contourArea(cnt)
if 500 < area < 50000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
#getting variables for the centroids
cx = int(x + w/2)
cy = int(y + h/2)
cen = (cx,cy)
cv2.circle(image, (cx,cy), 7, (255,0,0), -1)
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
.5, (0, 0, 255), 1)
cxx[j] = cx
cyy[j] = cy
pts.appendleft(cen)
#this is for plotting the past centroid positions
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# draw the centroid tracker
cv2.circle(image, (pts[i - 1]), 2, (0,0,255), -1)
#drawing hte current centroid
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
minx_index2 = []
miny_index2 = []
maxrad = 30
# if there are centroids in the specified area
if len(cxx):
if not carids: # if carids is empty
# loops through all centroids
for i in range(len(cxx)):
carids.append(i)
df[str(carids[i])] = ""
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1
else:
dx = np.zeros((len(cxx), len(carids)))
dy = np.zeros((len(cyy), len(carids)))
for i in range(len(cxx)):
for j in range(len(carids)):
# acquires centroid from previous frame for specific carid
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# acquires current frame centroid that doesn't necessarily line up with previous frame centroid
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows
continue # continue to next carid
else: # calculate centroid deltas to compare to current frame position later
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # loops through all current car ids
sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids
# finds which index carid had the min difference and this is true index
correctindextrue = np.argmin(np.abs(sumsum))
minx_index = correctindextrue
miny_index = correctindextrue
# acquires delta values of the minimum deltas in order to check if it is within radius later on
mindx = dx[minx_index, j]
mindy = dy[miny_index, j]
if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# checks if minimum value is 0 and checks if all deltas are zero since this is empty set
# delta could be zero if centroid didn't move
continue # continue to next carid
else:
# if delta values are less than maximum radius then add that centroid to that specific carid
if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:
# adds centroid to corresponding previously existing carid
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # appends all the indices that were added to previous carids
miny_index2.append(miny_index)
for i in range(len(cxx)): # loops through all centroids
# if centroid is not in the minindex list then another car needs to be added
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
elif curc | #arguments to start with
ap.add_argument("-b", "--buffer", type=int, default=5000,
help="max buffer size")
args = vars(ap.parse_args())
| random_line_split | |
cartracker.py | # creates a pandas data frame with the number of rows the same length as frame count
df = pd.DataFrame(index=range(int(frames_count)))
df.index.name = "Frames"
framenumber = 0 # keeps track of current frame
carids = [] # blank list to add car ids
totalcars = 0 # keeps track of total cars
#capturing data
while(True):
# Capture two frames
ret, frame1 = cap.read() # first image
time.sleep(1/25) # slight delay
ret, frame2 = cap.read() # second image
image = cv2.resize(frame1, (0, 0), None, 1,1)
#getting the difference as the basis for movement
diff = cv2.absdiff(frame1,frame2)
mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
th =25
imask = mask > th
canvas = np.zeros_like(frame2, np.uint8)
canvas[imask] = frame1[imask]
mask = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
#canvas = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# transforms
fgmask = fgbg.apply(mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (30,30))
#dilation = cv2.dilate(fgmask, kernel)
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
#opening = cv2.morphologyEx(closing, cv2.MORPH_ERODE, kernel)
mask =closing
# variable for contours
ret,thresh = cv2.threshold(mask,0,255,0)
# creates contours/blobs
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# use convex hull to create polygon around contours
hull = [cv2.convexHull(c) for c in contours]
# draw contours
cv2.drawContours(mask, hull, -1, (0, 255, 0), 2)
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
# line created to stop counting contours, needed as cars in distance become one big contour
lineypos = 400
cv2.line(image, (-100, lineypos), (width, -120), (255, 0, 0), 3) # blue
lineypos2 = -700
cv2.line(image, (-150, lineypos2), (width, 700), (0, 255, 0), 3) # green
cv2.line(image, (-150, -100), (width, 1800), (255, 255,0), 3)
#creating centroids and boxes
for j in range(len(contours)):
if hierarchy[0, j, 3] == -1:
cnt=contours[j]
area = cv2.contourArea(cnt)
if 500 < area < 50000:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
#getting variables for the centroids
cx = int(x + w/2)
cy = int(y + h/2)
cen = (cx,cy)
cv2.circle(image, (cx,cy), 7, (255,0,0), -1)
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
.5, (0, 0, 255), 1)
cxx[j] = cx
cyy[j] = cy
pts.appendleft(cen)
#this is for plotting the past centroid positions
for i in np.arange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
|
#drawing hte current centroid
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
minx_index2 = []
miny_index2 = []
maxrad = 30
# if there are centroids in the specified area
if len(cxx):
if not carids: # if carids is empty
# loops through all centroids
for i in range(len(cxx)):
carids.append(i)
df[str(carids[i])] = ""
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1
else:
dx = np.zeros((len(cxx), len(carids)))
dy = np.zeros((len(cyy), len(carids)))
for i in range(len(cxx)):
for j in range(len(carids)):
# acquires centroid from previous frame for specific carid
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# acquires current frame centroid that doesn't necessarily line up with previous frame centroid
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows
continue # continue to next carid
else: # calculate centroid deltas to compare to current frame position later
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # loops through all current car ids
sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids
# finds which index carid had the min difference and this is true index
correctindextrue = np.argmin(np.abs(sumsum))
minx_index = correctindextrue
miny_index = correctindextrue
# acquires delta values of the minimum deltas in order to check if it is within radius later on
mindx = dx[minx_index, j]
mindy = dy[miny_index, j]
if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# checks if minimum value is 0 and checks if all deltas are zero since this is empty set
# delta could be zero if centroid didn't move
continue # continue to next carid
else:
# if delta values are less than maximum radius then add that centroid to that specific carid
if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:
# adds centroid to corresponding previously existing carid
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # appends all the indices that were added to previous carids
miny_index2.append(miny_index)
for i in range(len(cxx)): # loops through all centroids
# if centroid is not in the minindex list then another car needs to be added
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:
# checks if current centroid exists but previous centroid does not
# new car to be added in case minx_index2 is empty
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
# The section below labels the centroids on screen
currentcars = 0 # current cars on screen | if pts[i - 1] is None or pts[i] is None:
continue
# draw the centroid tracker
cv2.circle(image, (pts[i - 1]), 2, (0,0,255), -1) | conditional_block |
util.go | {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template
session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr
}
// Get the current template - for each version we find in the version listing
// we look up the actual CF resource and copy it into this template
existingStackDefinition, existingStackDefinitionErr := existingStackTemplate(serviceName,
session,
logger)
if nil != existingStackDefinitionErr | {
return nil, existingStackDefinitionErr
} | conditional_block | |
util.go | (data map[string]interface{}) (*gocf.StringExpr, error) {
if len(data) <= 0 {
return nil, fmt.Errorf("FnJoinExpr data is empty")
}
for eachKey, eachValue := range data {
switch eachKey {
case "Ref":
return gocf.Ref(eachValue.(string)).String(), nil
case "Fn::GetAtt":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 2 {
return nil, fmt.Errorf("Invalid params for Fn::GetAtt: %s", eachValue)
}
return gocf.GetAtt(attrValues[0], attrValues[1]).String(), nil
case "Fn::FindInMap":
attrValues, attrValuesErr := toExpressionSlice(eachValue)
if nil != attrValuesErr {
return nil, attrValuesErr
}
if len(attrValues) != 3 {
return nil, fmt.Errorf("Invalid params for Fn::FindInMap: %s", eachValue)
}
return gocf.FindInMap(attrValues[0], gocf.String(attrValues[1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
}
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode | parseFnJoinExpr | identifier_name | |
util.go | 1]), gocf.String(attrValues[2])), nil
}
}
return nil, fmt.Errorf("Unsupported AWS Function detected: %#v", data)
}
////////////////////////////////////////////////////////////////////////////////
// Public
////////////////////////////////////////////////////////////////////////////////
// S3AllKeysArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) for all bucket keys (`/*`). The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3AllKeysArnForBucket(bucket interface{}) *gocf.StringExpr {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
arnParts = append(arnParts, gocf.String("/*"))
return gocf.Join("", arnParts...).String()
}
// S3ArnForBucket returns a CloudFormation-compatible Arn expression
// (string or Ref) suitable for template reference. The bucket
// parameter may be either a string or an interface{} ("Ref: "myResource")
// value
func S3ArnForBucket(bucket interface{}) *gocf.StringExpr |
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}). | {
arnParts := []gocf.Stringable{gocf.String("arn:aws:s3:::")}
switch bucket.(type) {
case string:
// Don't be smart if the Arn value is a user supplied literal
arnParts = append(arnParts, gocf.String(bucket.(string)))
case *gocf.StringExpr:
arnParts = append(arnParts, bucket.(*gocf.StringExpr))
case gocf.RefFunc:
arnParts = append(arnParts, bucket.(gocf.RefFunc).String())
default:
panic(fmt.Sprintf("Unsupported SourceArn value type: %+v", bucket))
}
return gocf.Join("", arnParts...).String()
} | identifier_body |
util.go | : %+v", bucket))
}
return gocf.Join("", arnParts...).String()
}
// MapToResourceTags transforms a go map[string]string to a CloudFormation-compliant
// Tags representation. See http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
func MapToResourceTags(tagMap map[string]string) []interface{} {
var tags []interface{}
for eachKey, eachValue := range tagMap {
tags = append(tags, map[string]interface{}{
"Key": eachKey,
"Value": eachValue,
})
}
return tags
}
// Struct to encapsulate transforming data into
type templateConverter struct {
templateReader io.Reader
additionalTemplateProps map[string]interface{}
// internals
expandedTemplate string
contents []gocf.Stringable
conversionError error
}
func (converter *templateConverter) expandTemplate() *templateConverter {
if nil != converter.conversionError {
return converter
}
templateDataBytes, templateDataErr := ioutil.ReadAll(converter.templateReader)
if nil != templateDataErr {
converter.conversionError = templateDataErr
return converter
}
templateData := string(templateDataBytes)
parsedTemplate, templateErr := template.New("CloudFormation").Parse(templateData)
if nil != templateErr {
converter.conversionError = templateDataErr
return converter
}
output := &bytes.Buffer{}
executeErr := parsedTemplate.Execute(output, converter.additionalTemplateProps)
if nil != executeErr {
converter.conversionError = executeErr
return converter
}
converter.expandedTemplate = output.String()
return converter
}
func (converter *templateConverter) parseData() *templateConverter {
if converter.conversionError != nil {
return converter
}
reAWSProp := regexp.MustCompile("\\{\\s*\"([Ref|Fn\\:\\:\\w+])")
splitData := strings.Split(converter.expandedTemplate, "\n")
splitDataLineCount := len(splitData)
for eachLineIndex, eachLine := range splitData {
curContents := eachLine
for len(curContents) != 0 {
matchInfo := reAWSProp.FindStringSubmatchIndex(curContents)
if nil != matchInfo {
// If there's anything at the head, push it.
if matchInfo[0] != 0 {
head := curContents[0:matchInfo[0]]
converter.contents = append(converter.contents, gocf.String(fmt.Sprintf("%s", head)))
curContents = curContents[len(head):]
}
// There's at least one match...find the closing brace...
var parsed map[string]interface{}
for indexPos, eachChar := range curContents {
if string(eachChar) == "}" {
testBlock := curContents[0 : indexPos+1]
err := json.Unmarshal([]byte(testBlock), &parsed)
if err == nil {
parsedContents, parsedContentsErr := parseFnJoinExpr(parsed)
if nil != parsedContentsErr {
converter.conversionError = parsedContentsErr
return converter
}
converter.contents = append(converter.contents, parsedContents)
curContents = curContents[indexPos+1:]
if len(curContents) <= 0 && (eachLineIndex < (splitDataLineCount - 1)) {
converter.contents = append(converter.contents, gocf.String("\n"))
}
break
}
}
}
if nil == parsed {
// We never did find the end...
converter.conversionError = fmt.Errorf("Invalid CloudFormation JSON expression on line: %s", eachLine)
return converter
}
} else {
// No match, just include it iff there is another line afterwards
newlineValue := ""
if eachLineIndex < (splitDataLineCount - 1) {
newlineValue = "\n"
}
// Always include a newline at a minimum
appendLine := fmt.Sprintf("%s%s", curContents, newlineValue)
if len(appendLine) != 0 {
converter.contents = append(converter.contents, gocf.String(appendLine))
}
break
}
}
}
return converter
}
func (converter *templateConverter) results() (*gocf.StringExpr, error) {
if nil != converter.conversionError {
return nil, converter.conversionError
}
return gocf.Join("", converter.contents...), nil
}
// ConvertToTemplateExpression transforms the templateData contents into
// an Fn::Join- compatible representation for template serialization.
// The templateData contents may include both golang text/template properties
// and single-line JSON Fn::Join supported serializations.
func ConvertToTemplateExpression(templateData io.Reader, additionalUserTemplateProperties map[string]interface{}) (*gocf.StringExpr, error) {
converter := &templateConverter{
templateReader: templateData,
additionalTemplateProps: additionalUserTemplateProperties,
}
return converter.expandTemplate().parseData().results()
}
func existingStackTemplate(serviceName string,
session *session.Session,
logger *logrus.Logger) (*gocf.Template, error) {
template, templateExists := cloudFormationStackTemplateMap[serviceName]
if !templateExists {
templateParams := &cloudformation.GetTemplateInput{
StackName: aws.String(serviceName),
}
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Info("Fetching existing CloudFormation template")
cloudformationSvc := cloudformation.New(session)
rawTemplate, rawTemplateErr := cloudformationSvc.GetTemplate(templateParams)
if nil != rawTemplateErr {
if strings.Contains(rawTemplateErr.Error(), "does not exist") {
template = nil
} else {
return nil, rawTemplateErr
}
} else {
t := gocf.Template{}
jsonDecodeErr := json.NewDecoder(strings.NewReader(*rawTemplate.TemplateBody)).Decode(&t)
if nil != jsonDecodeErr {
return nil, jsonDecodeErr
}
template = &t
}
cloudFormationStackTemplateMap[serviceName] = template
} else {
logger.WithFields(logrus.Fields{
"Service": serviceName,
}).Debug("Using cached CloudFormation Template resources")
}
return template, nil
}
func existingLambdaResourceVersions(serviceName string,
lambdaResourceName string,
session *session.Session,
logger *logrus.Logger) (*lambda.ListVersionsByFunctionOutput, error) {
errorIsNotExist := func(apiError error) bool {
return apiError != nil && strings.Contains(apiError.Error(), "does not exist")
}
logger.WithFields(logrus.Fields{
"ResourceName": lambdaResourceName,
}).Info("Fetching existing function versions")
cloudFormationSvc := cloudformation.New(session)
describeParams := &cloudformation.DescribeStackResourceInput{
StackName: aws.String(serviceName),
LogicalResourceId: aws.String(lambdaResourceName),
}
describeResponse, describeResponseErr := cloudFormationSvc.DescribeStackResource(describeParams)
logger.WithFields(logrus.Fields{
"Response": describeResponse,
"ResponseErr": describeResponseErr,
}).Debug("Describe response")
if errorIsNotExist(describeResponseErr) {
return nil, nil
} else if describeResponseErr != nil {
return nil, describeResponseErr
}
listVersionsParams := &lambda.ListVersionsByFunctionInput{
FunctionName: describeResponse.StackResourceDetail.PhysicalResourceId,
MaxItems: aws.Int64(128),
}
lambdaSvc := lambda.New(session)
listVersionsResp, listVersionsRespErr := lambdaSvc.ListVersionsByFunction(listVersionsParams)
if errorIsNotExist(listVersionsRespErr) {
return nil, nil
} else if listVersionsRespErr != nil {
return nil, listVersionsRespErr
}
logger.WithFields(logrus.Fields{
"Response": listVersionsResp,
"ResponseErr": listVersionsRespErr,
}).Debug("ListVersionsByFunction")
return listVersionsResp, nil
}
// AutoIncrementingLambdaVersionInfo is dynamically populated during
// a call AddAutoIncrementingLambdaVersionResource. The VersionHistory
// is a map of published versions to their CloudFormation resource names
type AutoIncrementingLambdaVersionInfo struct {
// The version that will be published as part of this operation
CurrentVersion int
// The CloudFormation resource name that defines the
// AWS::Lambda::Version resource to be included with this operation
CurrentVersionResourceName string
// The version history that maps a published version value
// to its CloudFormation resource name. Used for defining lagging
// indicator Alias values
VersionHistory map[int]string
}
// AddAutoIncrementingLambdaVersionResource inserts a new
// AWS::Lambda::Version resource into the template. It uses
// the existing CloudFormation template representation
// to determine the version index to append. The returned
// map is from `versionIndex`->`CloudFormationResourceName`
// to support second-order AWS::Lambda::Alias records on a
// per-version level
func AddAutoIncrementingLambdaVersionResource(serviceName string,
lambdaResourceName string,
cfTemplate *gocf.Template,
logger *logrus.Logger) (*AutoIncrementingLambdaVersionInfo, error) {
// Get the template | session, sessionErr := session.NewSession()
if sessionErr != nil {
return nil, sessionErr | random_line_split | |
gravity.js | function | ( element, options ) {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is
empty so that we don't alter the default options for future
instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top){
group.data.padding = {
top: delta.top - group.data.offset.top
};
}else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
| Plugin | identifier_name |
gravity.js | function Plugin ( element, options ) | Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top){
group.data.padding = {
top: delta.top - group.data.offset.top
};
}else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
| {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is
empty so that we don't alter the default options for future
instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic. | identifier_body |
gravity.js | function Plugin ( element, options ) {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is
empty so that we don't alter the default options for future
instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top) | else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
| {
group.data.padding = {
top: delta.top - group.data.offset.top
};
} | conditional_block |
gravity.js |
function Plugin ( element, options ) {
/*
Provide local access to the DOM node(s) that called the plugin,
as well local access to the plugin name and default options.
*/
this.element = element;
this._name = pluginName;
this._defaults = $.fn.gravity.defaults;
/*
The "$.extend" method merges the contents of two or more objects,
and stores the result in the first object. The first object is | instances of the plugin.
More: http://api.jquery.com/jquery.extend/
*/
this.options = $.extend( {}, this._defaults, options );
/*
The "init" method is the starting point for all plugin logic.
Calling the init method here in the "Plugin" constructor function
allows us to store all methods (including the init method) in the
plugin's prototype. Storing methods required by the plugin in its
prototype lowers the memory footprint, as each instance of the
plugin does not need to duplicate all of the same methods. Rather,
each instance can inherit the methods from the constructor
function's prototype.
*/
this.init();
}
// Avoid Plugin.prototype conflicts
$.extend(Plugin.prototype, {
// Initialization logic
init: function () {
/*
Create additional methods below and call them via
"this.myFunction(arg1, arg2)", ie: "this.buildCache();".
Note, you can cccess the DOM node(s), plugin name, default
plugin options and custom plugin options for a each instance
of the plugin by using the variables "this.element",
"this._name", "this._defaults" and "this.options" created in
the "Plugin" constructor function (as shown in the buildCache
method below).
*/
this.setGravitation();
this.buildCache();
this.calcGravity();
// this.bindEvents();
},
// Remove plugin instance completely
destroy: function() {
/*
The destroy method unbinds all events for the specific instance
of the plugin, then removes all plugin data that was stored in
the plugin instance using jQuery's .removeData method.
Since we store data for each instance of the plugin in its
instantiating element using the $.data method (as explained
in the plugin wrapper below), we can call methods directly on
the instance outside of the plugin initalization, ie:
$('selector').data('plugin_myPluginName').someOtherFunction();
Consequently, the destroy method can be called using:
$('selector').data('plugin_myPluginName').destroy();
*/
this.unbindEvents();
this.$element.removeData();
},
// Add default gravitation force
setGravitation: function () {
var $root = $(this.element);
$.each(this.options.gravitation, function(index,node){
$root.append('<div class="gravitation-node" gravity-id="'+node.name+'"></div>').find('.gravitation-node[gravity-id="'+node.name+'"]').css({
top: node.top,
left: node.left
});
});
},
// Cache DOM nodes for performance
buildCache: function () {
/*
Create variable(s) that can be accessed by other plugin
functions. For example, "this.$element = $(this.element);"
will cache a jQuery reference to the elementthat initialized
the plugin. Cached variables can then be used in other methods.
*/
this.$element = $(this.element);
this.$element.addClass('gravity-init');
var options = this.options;
var parents = 0;
this.$element.find('.gravity').each(function(index,element){
var parent = $(element).parent();
parent.addClass('gravity-parent');
$(element).attr('gravity-id',index);
if(parent.attr('gravity-id')===undefined){
parent.attr('gravity-id',parents);
options.elements.push({
parent: '.gravity-parent[gravity-id='+parents+']',
children: []
});
if(parents>0) parents += 1;
}
options.elements[parents].children.push({ id: '.gravity[gravity-id='+index+']' });
});
this.options = options;
},
calcGravity: function () {
var options = this.options;
// store position data
$.each(this.options.elements, function(index,group){
group.data = {
aWidth: $(group.parent).outerWidth(),
aHeight: $(group.parent).outerHeight(),
gWidth: 0,
gHeight: 0,
offset: {
top: $(group.parent).offset().top,
left: $(group.parent).offset().left
}
};
group.data.gravity = {
top: $('.gravity-init .gravitation-node').offset().top,
left: $('.gravity-init .gravitation-node').offset().left
}
$.each(group.children, function(index,child){
//var top, right, bottom, left;
child.data = {
width: $(child.id).outerWidth(),
height: $(child.id).outerHeight()
}
child.data.force = (child.data.height*options.k);
child.data.center = {
top: $(child.id).offset().top+(child.data.height/2),
left: $(child.id).offset().left+(child.data.width/2)
}
child.data.margin = {
top: parseInt($(child.id).css('margin-top'))/options.density*child.data.force,
right: parseInt($(child.id).css('margin-right'))/options.density*child.data.force,
bottom: parseInt($(child.id).css('margin-bottom'))/options.density*child.data.force,
left: parseInt($(child.id).css('margin-left'))/options.density*child.data.force
};
group.data.gWidth += child.data.margin.left + child.data.width + child.data.margin.right;
group.data.gHeight += child.data.margin.top + child.data.height + child.data.margin.bottom;
});
// console.log(group.data.gHeight);
var delta = 0;
group.data.center = {
top: $(group.parent).offset().top+(group.data.gHeight/2),
left: $(group.parent).offset().left+(group.data.gWidth/2)
};
delta = {
top: group.data.gravity.top - group.data.center.top,
left: group.data.gravity.left - group.data.center.left
}
// calc vertical force
if(group.data.gHeight>=group.data.aHeight){
group.data.padding = {
top: 0
};
}else if(group.data.gHeight<=group.data.aHeight-delta.top){
group.data.padding = {
top: delta.top - group.data.offset.top
};
}else{
group.data.padding = {
top: group.data.aHeight - group.data.gHeight
};
}
$(group.parent).css({
'padding-top': group.data.padding.top
});
// apply to DOM
$.each(group.children, function(index,child){
var m = $(child.id).outerHeight()*options.k;
// calc text alignment force
if($(child.id).css('text-align')=='start'){
if(child.data.center.left<group.data.gravity.left/2){
$(child.id).css('text-align','left');
}else if(group.data.gravity.left<(child.data.center.left*3)/2){
$(child.id).css('text-align','center');
}else{
$(child.id).css('text-align','right');
}
}
// apply deafult margin
$(child.id).css({
'margin-top': child.data.margin.top+'px',
'margin-bottom': child.data.margin.bottom+'px',
'margin-left': child.data.margin.left+'px',
'margin-right': child.data.margin.right+'px'
});
// if margin exceeds available container, reduce
if(child.height+(m*2)>group.data.aHeight){
margin = (group.data.aHeight-child.height)/2;
$(child.id).css({
'margin-top': margin+'px',
'margin-bottom': margin+'px'
});
}
// if margin to bounds is less apply default
// if($(child.id).offset().left<m){
// $(child.id).css({
// 'margin-left': m+'px'
// });
// }
});
});
},
// Bind events that trigger methods
bindEvents: function() {
var plugin = this;
/*
Bind event(s) to handlers that trigger other functions, ie:
"plugin.$element.on('click', function() {});". Note the use of
the cached variable we created in the buildCache method.
All events are namespaced, ie:
".on('click'+'.'+this._name', function() {});".
This allows us to unbind plugin-specific events using the
unbindEvents method below.
*/
plugin.$element.on('click'+'.'+plugin._name, function() {
/*
Use the "call" method so that inside of the method being
called, ie: "someOtherFunction", the "this" keyword refers
to the plugin instance, not the event handler.
More: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/call
*/
plugin.someOtherFunction.call(plugin);
});
| empty so that we don't alter the default options for future | random_line_split |
lsfiles.go | = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files {
indexPath, err := file.IndexPath(c)
if err != nil {
return nil, err
}
if _, ok := filesInIndex[indexPath]; !ok {
return nil, fmt.Errorf("error: pathspec '%v' did not match any file(s) known to git", file)
}
}
}
if opt.Others {
wd := File(c.WorkDir)
ignorePatterns := []IgnorePattern{}
if opt.ExcludeStandard {
opt.ExcludeFiles = append(opt.ExcludeFiles, File(filepath.Join(c.GitDir.String(), "info/exclude")))
opt.ExcludePerDirectory = append(opt.ExcludePerDirectory, ".gitignore")
}
for _, file := range opt.ExcludeFiles {
patterns, err := ParseIgnorePatterns(c, file, "")
if err != nil {
return nil, err
}
ignorePatterns = append(ignorePatterns, patterns...)
}
for _, pattern := range opt.ExcludePatterns {
ignorePatterns = append(ignorePatterns, IgnorePattern{Pattern: pattern, Source: "", LineNum: 1, Scope: ""})
}
others := findUntrackedFilesFromDir(c, opt, wd+"/", wd, wd, filesInIndex, !opt.Directory, ignorePatterns)
for _, file := range others {
f, err := file.PathName.FilePath(c)
if err != nil {
return nil, err
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
}
if skip | {
continue
} | conditional_block | |
lsfiles.go | recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files {
indexPath, err := file.IndexPath(c)
if err != nil {
return nil, err
}
if _, ok := filesInIndex[indexPath]; !ok {
return nil, fmt.Errorf("error: pathspec '%v' did not match any file(s) known to git", file)
}
}
}
if opt.Others {
wd := File(c.WorkDir)
ignorePatterns := []IgnorePattern{}
if opt.ExcludeStandard {
opt.ExcludeFiles = append(opt.ExcludeFiles, File(filepath.Join(c.GitDir.String(), "info/exclude")))
opt.ExcludePerDirectory = append(opt.ExcludePerDirectory, ".gitignore")
}
for _, file := range opt.ExcludeFiles {
patterns, err := ParseIgnorePatterns(c, file, "")
if err != nil {
return nil, err
}
ignorePatterns = append(ignorePatterns, patterns...)
}
for _, pattern := range opt.ExcludePatterns {
ignorePatterns = append(ignorePatterns, IgnorePattern{Pattern: pattern, Source: "", LineNum: 1, Scope: ""})
}
others := findUntrackedFilesFromDir(c, opt, wd+"/", wd, wd, filesInIndex, !opt.Directory, ignorePatterns)
for _, file := range others {
f, err := file.PathName.FilePath(c)
if err != nil {
return nil, err
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
}
if skip {
continue
}
}
fs = append(fs, LsFilesResult{file, '?'})
}
}
sort.Sort(lsByPath(fs))
return fs, nil
}
// Implement the sort interface on *GitIndexEntry, so that
// it's easy to sort by name.
type lsByPath []LsFilesResult
func (g lsByPath) | Len | identifier_name | |
lsfiles.go | }
files:
for _, fi := range files {
fname := File(fi.Name())
if fi.Name() == ".git" {
continue
}
for _, pattern := range ignorePatterns {
var name File
if parent == "" {
name = fname
} else {
name = parent + "/" + fname
}
if pattern.Matches(name.String(), fi.IsDir()) {
continue files
}
}
if fi.IsDir() {
if !recursedir {
// This isn't very efficient, but lets us implement git ls-files --directory
// without too many changes.
indexPath, err := (parent + "/" + fname).IndexPath(c)
if err != nil {
panic(err)
}
dirHasTracked := false
for path := range tracked {
if strings.HasPrefix(path.String(), indexPath.String()) {
dirHasTracked = true
break
}
}
if !dirHasTracked {
if opts.Directory {
if opts.NoEmptyDirectory {
if files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {
continue
}
}
indexPath += "/"
}
untracked = append(untracked, &IndexEntry{PathName: indexPath})
continue
}
}
var newparent, newdir File
if parent == "" {
newparent = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range | {
files, err := ioutil.ReadDir(dir.String())
if err != nil {
return nil
}
for _, ignorefile := range opts.ExcludePerDirectory {
ignoreInDir := ignorefile
if dir != "" {
ignoreInDir = dir + "/" + ignorefile
}
if ignoreInDir.Exists() {
log.Println("Adding excludes from", ignoreInDir)
patterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)
if err != nil {
continue
}
ignorePatterns = append(ignorePatterns, patterns...)
} | identifier_body | |
lsfiles.go | return nil
}
for _, ignorefile := range opts.ExcludePerDirectory {
ignoreInDir := ignorefile
if dir != "" {
ignoreInDir = dir + "/" + ignorefile
}
if ignoreInDir.Exists() {
log.Println("Adding excludes from", ignoreInDir)
patterns, err := ParseIgnorePatterns(c, ignoreInDir, dir)
if err != nil {
continue
}
ignorePatterns = append(ignorePatterns, patterns...)
}
}
files:
for _, fi := range files {
fname := File(fi.Name())
if fi.Name() == ".git" {
continue
}
for _, pattern := range ignorePatterns {
var name File
if parent == "" {
name = fname
} else {
name = parent + "/" + fname
}
if pattern.Matches(name.String(), fi.IsDir()) {
continue files
}
}
if fi.IsDir() {
if !recursedir {
// This isn't very efficient, but lets us implement git ls-files --directory
// without too many changes.
indexPath, err := (parent + "/" + fname).IndexPath(c)
if err != nil {
panic(err)
}
dirHasTracked := false
for path := range tracked {
if strings.HasPrefix(path.String(), indexPath.String()) {
dirHasTracked = true
break
}
}
if !dirHasTracked {
if opts.Directory {
if opts.NoEmptyDirectory {
if files, err := ioutil.ReadDir(fname.String()); len(files) == 0 && err == nil {
continue
}
}
indexPath += "/"
}
untracked = append(untracked, &IndexEntry{PathName: indexPath})
continue
}
}
var newparent, newdir File
if parent == "" {
newparent = fname
} else {
newparent = parent + "/" + fname
}
if dir == "" {
newdir = fname
} else {
newdir = dir + "/" + fname
}
recurseFiles := findUntrackedFilesFromDir(c, opts, root, newparent, newdir, tracked, recursedir, ignorePatterns)
untracked = append(untracked, recurseFiles...)
} else {
var filePath File
if parent == "" {
filePath = File(strings.TrimPrefix(fname.String(), root.String()))
} else {
filePath = File(strings.TrimPrefix((parent + "/" + fname).String(), root.String()))
}
indexPath, err := filePath.IndexPath(c)
if err != nil {
panic(err)
}
indexPath = IndexPath(filePath)
if _, ok := tracked[indexPath]; !ok {
untracked = append(untracked, &IndexEntry{PathName: indexPath})
}
}
}
return
}
// Describes the options that may be specified on the command line for
// "git diff-index". Note that only raw mode is currently supported, even
// though all the other options are parsed/set in this struct.
type LsFilesOptions struct {
// Types of files to show
Cached, Deleted, Modified, Others bool
// Invert exclusion logic
Ignored bool
// Show stage status instead of just file name
Stage bool
// Show files which are unmerged. Implies Stage.
Unmerged bool
// Show files which need to be removed for checkout-index to succeed
Killed bool
// If a directory is classified as "other", show only its name, not
// its contents
Directory bool
// Do not show empty directories with --others
NoEmptyDirectory bool
// Exclude standard patterns (ie. .gitignore and .git/info/exclude)
ExcludeStandard bool
// Exclude using the provided patterns
ExcludePatterns []string
// Exclude using the provided file with the patterns
ExcludeFiles []File
// Exclude using additional patterns from each directory
ExcludePerDirectory []File
ErrorUnmatch bool
// Equivalent to the -t option to git ls-files
Status bool
}
type LsFilesResult struct {
*IndexEntry
StatusCode rune
}
// LsFiles implements the git ls-files command. It returns an array of files
// that match the options passed.
func LsFiles(c *Client, opt LsFilesOptions, files []File) ([]LsFilesResult, error) {
var fs []LsFilesResult
index, err := c.GitDir.ReadIndex()
if err != nil {
return nil, err
}
// We need to keep track of what's in the index if --others is passed.
// Keep a map instead of doing an O(n) search every time.
var filesInIndex map[IndexPath]bool
if opt.Others || opt.ErrorUnmatch {
filesInIndex = make(map[IndexPath]bool)
}
for _, entry := range index.Objects {
f, err := entry.PathName.FilePath(c)
if err != nil {
return nil, err
}
if opt.Killed {
// We go through each parent to check if it exists on the filesystem
// until we find a directory (which means there's no more files getting
// in the way of os.MkdirAll from succeeding in CheckoutIndex)
pathparent := filepath.Clean(path.Dir(f.String()))
for pathparent != "" && pathparent != "." {
f := File(pathparent)
if f.IsDir() {
// We found a directory, so there's nothing
// getting in the way
break
} else if f.Exists() {
// It's not a directory but it exists,
// so we need to delete it
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
// check the next level of the directory path
pathparent, _ = filepath.Split(filepath.Clean(pathparent))
}
if f.IsDir() {
indexPath, err := f.IndexPath(c)
if err != nil {
return nil, err
}
fs = append(fs, LsFilesResult{
&IndexEntry{PathName: indexPath},
'K',
})
}
}
if opt.Others || opt.ErrorUnmatch {
filesInIndex[entry.PathName] = true
}
if strings.HasPrefix(f.String(), "../") || len(files) > 0 {
skip := true
for _, explicit := range files {
eAbs, err := filepath.Abs(explicit.String())
if err != nil {
return nil, err
}
fAbs, err := filepath.Abs(f.String())
if err != nil {
return nil, err
}
if fAbs == eAbs || strings.HasPrefix(fAbs, eAbs+"/") {
skip = false
break
}
if f.MatchGlob(explicit.String()) {
skip = false
break
}
}
if skip {
continue
}
}
if opt.Cached {
if entry.SkipWorktree() {
fs = append(fs, LsFilesResult{entry, 'S'})
} else {
fs = append(fs, LsFilesResult{entry, 'H'})
}
continue
}
if opt.Deleted {
if !f.Exists() {
fs = append(fs, LsFilesResult{entry, 'R'})
continue
}
}
if opt.Unmerged && entry.Stage() != Stage0 {
fs = append(fs, LsFilesResult{entry, 'M'})
continue
}
if opt.Modified {
if f.IsDir() {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// If we couldn't stat it, we assume it was deleted and
// is therefore modified. (It could be because the file
// was deleted, or it could be bcause a parent directory
// was deleted and we couldn't stat it. The latter means
// that os.IsNotExist(err) can't be used to check if it
// really was deleted, so for now we just assume.)
if _, err := f.Stat(); err != nil {
fs = append(fs, LsFilesResult{entry, 'C'})
continue
}
// We've done everything we can to avoid hashing the file, but now
// we need to to avoid the case where someone changes a file, then
// changes it back to the original contents
hash, _, err := HashFile("blob", f.String())
if err != nil {
return nil, err
}
if hash != entry.Sha1 {
fs = append(fs, LsFilesResult{entry, 'C'})
}
}
}
if opt.ErrorUnmatch {
for _, file := range files | files, err := ioutil.ReadDir(dir.String())
if err != nil { | random_line_split | |
plot_all_sampled_days.py | # with centroid and angle displayed
import mpl_toolkits.basemap as bm
from mpl_toolkits.basemap import Basemap, cm
from netCDF4 import Dataset as NetCDFFile
import numpy as np
import matplotlib.pyplot as plt
import sys,os
cwd=os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd+'/../../MetBot')
sys.path.append(cwd+'/../../RTools')
sys.path.append(cwd+'/../')
import PlotTools as pt
import MetBot.dset_dict as dsetdict
import dsets_paper_28_4plot as dset_mp
import MetBot.dimensions_dict as dim_exdict
import MetBot.mytools as my
import MetBot.mynetcdf as mync
import MetBot.SynopticAnatomy as sy
import MetBot.MetBlobs as blb
import time as tm
import datetime
### Running options
size='20'
globv='olr'
postrmm=False
sub='SA'
from_event='first'
sample='blon'
type='cont'
if type == 'cont':
jj = 0
if sample == 'blon':
best_lon = [33,58]
ndays=[50,50]
n_cen = [-22,-22]
s_cen = [-32,-32]
t_ang = [-60,-50]
b_ang = [-25,-15]
f_seas = [11,11]
l_seas = [3,3]
# How many plots do you want?
if size=='20':
nplot=int(size)
xplots=4
yplots=5
### Get directories
bkdir=cwd+"/../../../CTdata/"
botdir=bkdir+"metbot_multi_dset/"
thisdir=bkdir+"hpaperplot/plot_all_sampled_days"
my.mkdir_p(thisdir)
### Multi dset?
dsets='spec' # "all" or "spec" to choose specific dset(s)
if dsets=='all':
ndset=len(dset_mp.dset_deets)
dsetnames=list(dset_mp.dset_deets)
elif dsets=='spec': # edit for the dset you want
#ndset=1
#dsetnames=['ncep']
ndset=1
dsetnames=['cmip5']
ndstr=str(ndset)
for d in range(ndset):
dset=dsetnames[d]
dcnt=str(d+1)
print 'Running on '+dset
print 'This is dset '+dcnt+' of '+ndstr+' in list'
outdir=thisdir+'/'+dset+'/'
my.mkdir_p(outdir)
### Multi model?
mods = 'spec' # "all" or "spec" to choose specific model(s)
if mods == 'all':
nmod = len(dset_mp.dset_deets[dset])
mnames = list(dset_mp.dset_deets[dset])
if mods == 'spec': # edit for the models you want
nmod = 1
mnames = ['ACCESS1-0']
#nmod=5
#mnames=['ACCESS1-0','bcc-csm1-1-m','CanESM2','GFDL-CM3','MIROC-ESM']
nmstr = str(nmod)
for m in range(nmod):
name = mnames[m]
mcnt = str(m + 1)
print 'Running on ' + name
print 'This is model ' + mcnt + ' of ' + nmstr + ' in list'
# Get info
moddct = dsetdict.dset_deets[dset][name]
vnamedict = globv + 'name'
varstr = moddct[vnamedict]
ys = moddct['yrfname']
dimdict = dim_exdict.dim_deets[globv][dset]
latname = dimdict[1]
lonname = dimdict[2]
# Open olr file
olrfile=botdir+dset+'/'+name+'.'+globv+\
'.day.mean.'+ys+'.nc'
print 'Opening '+olrfile
ncout = mync.open_multi(olrfile, globv, name, \
dataset=dset, subs=sub)
ndim = len(ncout)
if ndim == 5:
olrdata, time, lat, lon, dtime = ncout
elif ndim == 6:
olrdata, time, lat, lon, lev, dtime = ncout
olrdata = np.squeeze(olrdata)
else:
print 'Check number of dims in ncfile'
dtime[:, 3] = 0
# Select dates with TTCBs only
threshtxt = botdir + 'thresholds.fmin.all_dset.txt'
print threshtxt
with open(threshtxt) as f:
for line in f:
if dset + '\t' + name in line:
thresh = line.split()[2]
print 'thresh=' + str(thresh)
thresh = int(thresh)
thisthresh = thresh
thre_str = str(int(thisthresh))
### Open synop file
sydir=botdir+dset+'/'+name+'/'
sysuf=sydir+name+'_'
mbsfile = sysuf + thre_str + '_' + dset + "-olr-0-0.mbs"
refmbs, refmbt, refch = blb.mbopen(mbsfile)
refmbt[:,3]=0
if from_event == 'first':
syfile = sysuf + thre_str + '_' + dset + '-OLR.synop'
s = sy.SynopticEvents((), [syfile], COL=False)
ks = s.events.keys()
ks.sort()
refkey = s.mbskeys[0]
count_all = str(int(len(ks)))
print "Total CBs flagged =" + str(count_all)
ev_dts = []
ev_keys = []
ev_cXs = []
for k in ks:
e = s.events[k]
dts = s.blobs[refkey]['mbt'][e.ixflags]
for dt in range(len(dts)):
x, y = e.trkcX[dt], e.trkcY[dt]
ev_dts.append(dts[dt])
ev_keys.append(k)
ev_cXs.append(x)
ev_dts = np.asarray(ev_dts)
ev_dts[:, 3] = 0
ev_keys = np.asarray(ev_keys)
ev_cXs = np.asarray(ev_cXs)
### Get array of centroids and angles
edts = []
cXs = []
cYs = []
degs = []
mons = []
for b in range(len(refmbt)):
date = refmbt[b]
mon = int(date[1])
cX = refmbs[b, 3]
cY = refmbs[b, 4]
deg = refmbs[b, 2]
if from_event == 'all':
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
elif from_event == 'first':
# print 'Checking if the date is the first day of an event'
ix = my.ixdtimes(ev_dts, [date[0]], \
[date[1]], [date[2]], [0])
if len(ix) == 1:
key = ev_keys[ix]
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
elif len(ix) > 1:
# print 'there is more than one event on this day'
# print 'lets find the centroid that matches'
todays_cXs = ev_cXs[ix]
index2 = np.where(todays_cXs == cX)[0]
if len(index2) != 1:
print 'Error - centroid not matching'
index3 = ix[index2]
key = ev_keys[index3]
# print 'selecting event with matching centroid'
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
# print 'but is it the first date?'
if dts[0, 0] == date[0]:
if dts[ | # To plot a maps for flagged CB days
# after selection based on angle and centroid
#
# plotted over a larger domain | random_line_split | |
plot_all_sampled_days.py |
elif dsets=='spec': # edit for the dset you want
#ndset=1
#dsetnames=['ncep']
ndset=1
dsetnames=['cmip5']
ndstr=str(ndset)
for d in range(ndset):
dset=dsetnames[d]
dcnt=str(d+1)
print 'Running on '+dset
print 'This is dset '+dcnt+' of '+ndstr+' in list'
outdir=thisdir+'/'+dset+'/'
my.mkdir_p(outdir)
### Multi model?
mods = 'spec' # "all" or "spec" to choose specific model(s)
if mods == 'all':
nmod = len(dset_mp.dset_deets[dset])
mnames = list(dset_mp.dset_deets[dset])
if mods == 'spec': # edit for the models you want
nmod = 1
mnames = ['ACCESS1-0']
#nmod=5
#mnames=['ACCESS1-0','bcc-csm1-1-m','CanESM2','GFDL-CM3','MIROC-ESM']
nmstr = str(nmod)
for m in range(nmod):
name = mnames[m]
mcnt = str(m + 1)
print 'Running on ' + name
print 'This is model ' + mcnt + ' of ' + nmstr + ' in list'
# Get info
moddct = dsetdict.dset_deets[dset][name]
vnamedict = globv + 'name'
varstr = moddct[vnamedict]
ys = moddct['yrfname']
dimdict = dim_exdict.dim_deets[globv][dset]
latname = dimdict[1]
lonname = dimdict[2]
# Open olr file
olrfile=botdir+dset+'/'+name+'.'+globv+\
'.day.mean.'+ys+'.nc'
print 'Opening '+olrfile
ncout = mync.open_multi(olrfile, globv, name, \
dataset=dset, subs=sub)
ndim = len(ncout)
if ndim == 5:
olrdata, time, lat, lon, dtime = ncout
elif ndim == 6:
olrdata, time, lat, lon, lev, dtime = ncout
olrdata = np.squeeze(olrdata)
else:
print 'Check number of dims in ncfile'
dtime[:, 3] = 0
# Select dates with TTCBs only
threshtxt = botdir + 'thresholds.fmin.all_dset.txt'
print threshtxt
with open(threshtxt) as f:
for line in f:
if dset + '\t' + name in line:
thresh = line.split()[2]
print 'thresh=' + str(thresh)
thresh = int(thresh)
thisthresh = thresh
thre_str = str(int(thisthresh))
### Open synop file
sydir=botdir+dset+'/'+name+'/'
sysuf=sydir+name+'_'
mbsfile = sysuf + thre_str + '_' + dset + "-olr-0-0.mbs"
refmbs, refmbt, refch = blb.mbopen(mbsfile)
refmbt[:,3]=0
if from_event == 'first':
syfile = sysuf + thre_str + '_' + dset + '-OLR.synop'
s = sy.SynopticEvents((), [syfile], COL=False)
ks = s.events.keys()
ks.sort()
refkey = s.mbskeys[0]
count_all = str(int(len(ks)))
print "Total CBs flagged =" + str(count_all)
ev_dts = []
ev_keys = []
ev_cXs = []
for k in ks:
e = s.events[k]
dts = s.blobs[refkey]['mbt'][e.ixflags]
for dt in range(len(dts)):
x, y = e.trkcX[dt], e.trkcY[dt]
ev_dts.append(dts[dt])
ev_keys.append(k)
ev_cXs.append(x)
ev_dts = np.asarray(ev_dts)
ev_dts[:, 3] = 0
ev_keys = np.asarray(ev_keys)
ev_cXs = np.asarray(ev_cXs)
### Get array of centroids and angles
edts = []
cXs = []
cYs = []
degs = []
mons = []
for b in range(len(refmbt)):
date = refmbt[b]
mon = int(date[1])
cX = refmbs[b, 3]
cY = refmbs[b, 4]
deg = refmbs[b, 2]
if from_event == 'all':
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
elif from_event == 'first':
# print 'Checking if the date is the first day of an event'
ix = my.ixdtimes(ev_dts, [date[0]], \
[date[1]], [date[2]], [0])
if len(ix) == 1:
key = ev_keys[ix]
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
elif len(ix) > 1:
# print 'there is more than one event on this day'
# print 'lets find the centroid that matches'
todays_cXs = ev_cXs[ix]
index2 = np.where(todays_cXs == cX)[0]
if len(index2) != 1:
print 'Error - centroid not matching'
index3 = ix[index2]
key = ev_keys[index3]
# print 'selecting event with matching centroid'
e = s.events[key[0]]
dts = s.blobs[refkey]['mbt'][e.ixflags]
# print 'but is it the first date?'
if dts[0, 0] == date[0]:
if dts[0, 1] == date[1]:
if dts[0, 2] == date[2]:
# print 'it is the first date, so we keep it'
edts.append(date)
cXs.append(cX)
cYs.append(cY)
degs.append(deg)
mons.append(mon)
# else:
# print 'this is not the first day... ignore'
# else:
# print 'this is not the first day... ignore'
edts = np.asarray(edts)
edts[:, 3] = 0
cXs = np.asarray(cXs)
cYs = np.asarray(cYs)
degs = np.asarray(degs)
mons = np.asarray(mons)
# Select the dates for 50 closest
### Loop flagged days and select those with certain angle and centroid
print 'looping flagged days to find good centroids and angles'
tmp_edts = []
if sample == 'blon' or sample == 'blon2':
tmp_cXs = []
tmp_cYs = []
tmp_degs = []
tmp_mons = []
for b in range(len(edts)):
date = edts[b]
mon = mons[b]
cX = cXs[b]
cY = cYs[b]
deg = degs[b]
# Check on the month
if mon >= f_seas[jj] or mon <= l_seas[jj]:
if sample == 'blon' or sample == 'blon2':
# Check on the latitude of centroid
if cY > s_cen[jj] and cY < n_cen[jj]:
# Check on the angle
if deg > t_ang[jj] and deg < b_ang[jj]:
tmp_edts.append(date)
tmp_cXs.append(cX)
tmp_cYs.append(cY)
tmp_degs.append(deg)
tmp_mons.append(mon)
tmp_edts = np.asarray(tmp_edts)
tmp_edts[:, 3] = 0
if sample == 'blon' or | ndset=len(dset_mp.dset_deets)
dsetnames=list(dset_mp.dset_deets) | conditional_block | |
lib.rs | //!
//! assert_eq!(get_symbol_description(&BAR).unwrap(), "the second symbol!");
//! ```
//!
//! For static symbols, the implementations of [Eq]/[Ord]/[Hash](std::hash::Hash) et. al use only the namespace's [type_id](std::any::Any::type_id)
//! plus the symbol's numerical `id`.
//!
//! Typically, the boilerplate code for a static namespace will be generated by macros or `build.rs`.
//!
//! # Dynamic symbols
//! Sometimes the values that a symbol can take are not known upfront. In this case we have to resort to memory allocation.
//! Dynamic symbols implement a different namespace trait: [namespace::Dynamic]. The type that implements this trait also
//! functions as the symbol _instance_ itself:
//!
//! ```
//! use dyn_symbol::*;
//!
//! // This symbol is string-based:
//! struct DynamicNS(String);
//!
//! impl namespace::Dynamic for DynamicNS {
//! fn namespace_name(&self) -> &str {
//! "dynamic"
//! }
//!
//! fn symbol_name(&self) -> &str {
//! &self.0
//! }
//!
//! fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
//! Box::new(DynamicNS(self.0.clone()))
//! }
//!
//! /// Note: calling code should already have verified that these are indeed the same types, using `type_id`.
//! fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
//! self.0 == rhs.downcast_ref::<DynamicNS>().unwrap().0
//! }
//!
//! fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
//! self.0.cmp(&rhs.downcast_ref::<DynamicNS>().unwrap().0)
//! }
//!
//! fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
//! // we are now in `dyn` land, so the [std::hash::Hash] trait cannot be used:
//! state.write(self.0.as_bytes());
//! state.write_u8(0xff)
//! }
//! }
//!
//! let foo0 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let foo1 = Symbol::Dynamic(Box::new(DynamicNS("foo".into())));
//! let bar = Symbol::Dynamic(Box::new(DynamicNS("bar".into())));
//!
//! assert_eq!(foo0, foo1);
//! assert_eq!(foo0.clone(), foo1.clone());
//! assert_ne!(foo0, bar);
//! ```
//!
//! It is entirely up to the Dynamic implementation to consider what kind of symbols are considered equal.
//! The `Eq`/`Hash` symmetry need to hold, though.
//!
//! Dynamic symbols are supported as a companion to static symbols. If your application works mainly with dynamic symbols,
//! you should consider using a different keying mechanism, because of the inherent overhead/indirection/boxing of dynamic symbols.
//!
//! # Type system
//! This crate makes use of [Any](std::any::Any), and consideres namespaces sharing the same [TypeId](std::any::TypeId) to be the _same namespace_.
//! This could make code reuse a bit cumbersome. If one crate exports multiple namespaces, this can be solved by using const generics:
//!
//! ```
//! struct ReusableNamespace<const N: u8>;
//!
//! // impl<const N: u8> namespace::Static for MyNamespace<N> { ... }
//!
//! const NS_1: ReusableNamespace<1> = ReusableNamespace;
//! const NS_2: ReusableNamespace<2> = ReusableNamespace;
//!
//! // assert_ne!(NS_1.type_id(), NS_2.type_id());
//! ```
//!
//! This will cause the two namespaces to have differing `type_id`s.
//!
//!
use std::cmp::Ordering;
///
/// A symbol, with support for mixed static/dynamic allocation.
///
pub enum Symbol {
/// Construct a Symbol originating from a static namespace.
/// The first parameter is a trait object pointing back to the namespace,
/// the second parameter is the symbol `id` within that namespace.
Static(&'static dyn namespace::Static, u32),
/// Construct a Symbol with dynamic origins. Dynamic namespaces are unbounded in size,
/// so a memory allocation is needed. This encoding allows dynamic namespaces to support
/// the same semantics that static namespaces do. Instead of just using a [String], we
/// can also encode what kind of string it is.
Dynamic(Box<dyn namespace::Dynamic>),
}
impl Symbol {
pub fn name(&self) -> &str {
match self {
Self::Static(ns, id) => ns.symbol_name(*id),
Self::Dynamic(instance) => instance.symbol_name(),
}
}
///
/// Get access to the associated namespace's `Any` representation.
/// its `type_id` may be used as a reflection tool to get to know about the Symbol's origin.
///
pub fn as_any(&self) -> &dyn std::any::Any {
match self {
Self::Static(ns, _) => ns.as_any(),
Self::Dynamic(instance) => instance.as_any(),
}
}
///
/// Try to downcast this Symbol's originating _static namespace_ to a concrete `&T`,
/// and if successful, return that concrete namespace along with the symbol's static id.
///
pub fn downcast_static<T: 'static>(&self) -> Option<(&T, u32)> {
match self {
Self::Static(ns, id) => ns.as_any().downcast_ref::<T>().map(|t| (t, *id)),
Self::Dynamic(_) => None,
}
}
///
/// Try to downcast this Symbol's _dynamic namespace_ as a `&T`.
///
/// Always fails for static namespaces.
///
pub fn downcast_dyn<T: 'static>(&self) -> Option<&T> {
match self {
Self::Static(_, _) => None,
Self::Dynamic(instance) => instance.as_any().downcast_ref::<T>(),
}
}
}
impl Clone for Symbol {
fn clone(&self) -> Self {
match self {
Self::Static(static_symbol, id) => Self::Static(*static_symbol, *id),
Self::Dynamic(instance) => Self::Dynamic(instance.dyn_clone()),
}
}
}
impl std::fmt::Debug for Symbol {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self { | f,
"{}::{}",
instance.namespace_name(),
instance.symbol_name()
)
}
}
}
}
impl PartialEq for Symbol {
fn eq(&self, rhs: &Symbol) -> bool {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// | Self::Static(ns, id) => {
write!(f, "{}::{}", ns.namespace_name(), ns.symbol_name(*id))
}
Self::Dynamic(instance) => {
write!( | random_line_split |
lib.rs | {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn test_equality() | {
let test_state = TestState::new();
test_state.assert_full_eq(&STATIC_A_0, &STATIC_A_0);
test_state.assert_full_eq(&STATIC_A_1, &STATIC_A_1);
test_state.assert_full_eq(&STATIC_B_0, &STATIC_B_0);
test_state.assert_full_ne(&STATIC_A_0, &STATIC_A_1);
test_state.assert_full_ne(&STATIC_A_1, &STATIC_B_0);
test_state.assert_full_eq(&dynamic::sym0("foo"), &dynamic::sym0("foo"));
} | identifier_body | |
lib.rs | (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
*this_id == *rhs_id && this_ns.type_id() == rhs_ns.type_id()
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
this.type_id() == rhs.type_id() && this.dyn_eq(rhs.as_ref())
}
_ => false,
}
}
}
impl Eq for Symbol {}
impl Ord for Symbol {
fn cmp(&self, rhs: &Symbol) -> Ordering {
match (self, rhs) {
(Self::Static(this_ns, this_id), Self::Static(rhs_ns, rhs_id)) => {
let this_type_id = this_ns.type_id();
let rhs_type_id = rhs_ns.type_id();
if this_type_id == rhs_type_id {
this_id.cmp(&rhs_id)
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Dynamic(this), Self::Dynamic(rhs)) => {
let this_type_id = this.type_id();
let rhs_type_id = rhs.type_id();
if this_type_id == rhs_type_id {
this.dyn_cmp(rhs.as_ref())
} else {
this_type_id.cmp(&rhs_type_id)
}
}
(Self::Static(_, _), Self::Dynamic(_)) => Ordering::Less,
(Self::Dynamic(_), Self::Static(_, _)) => Ordering::Greater,
}
}
}
impl PartialOrd for Symbol {
fn partial_cmp(&self, other: &Symbol) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl std::hash::Hash for Symbol {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Static(ns, id) => {
ns.type_id().hash(state);
state.write_u32(*id)
}
Self::Dynamic(dynamic_sym) => {
dynamic_sym.type_id().hash(state);
dynamic_sym.dyn_hash(state)
}
}
}
}
pub mod namespace {
//!
//! Namespace traits that must be implemented by symbol providers.
//!
use downcast_rs::*;
///
/// A static namespace. Symbols in a static namespace are identified with an `id` encoded as a `u32`.
///
pub trait Static: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// A symbol's name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self, id: u32) -> &str;
}
///
/// A dynamic namespace. A dynamic symbol instance is tied to `Self`.
///
pub trait Dynamic: Send + Sync + Downcast {
///
/// The namespace's name, used for [Debug][std::fmt::Debug].
///
fn namespace_name(&self) -> &str;
///
/// The symbol name, used for [Debug][std::fmt::Debug].
///
fn symbol_name(&self) -> &str;
///
/// Clone this dynamic symbol. Must return a new symbol instance that is `eq` to `&self`.
///
fn dyn_clone(&self) -> Box<dyn Dynamic>;
///
/// Dynamic [eq](std::cmp::PartialEq::eq). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_eq(&self, rhs: &dyn Dynamic) -> bool;
///
/// Dynamic [cmp](std::cmp::Ord::cmp). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_cmp(&self, rhs: &dyn Dynamic) -> std::cmp::Ordering;
///
/// Dynamic [hash](std::hash::Hash::hash). `rhs` can be unconditionally downcasted to `Self`.
///
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher);
}
impl_downcast!(Dynamic);
}
#[cfg(test)]
mod tests {
use super::*;
use std::hash::{BuildHasher, Hash, Hasher};
mod _static {
use super::*;
pub struct ClassN<const N: u8> {
class_name: &'static str,
names: &'static [&'static str],
}
impl<const N: u8> namespace::Static for ClassN<N> {
fn namespace_name(&self) -> &str {
self.class_name
}
fn symbol_name(&self, id: u32) -> &str {
self.names[id as usize]
}
}
pub const STATIC_NS_CLASS_A: ClassN<1> = ClassN {
class_name: "A",
names: &["0", "1"],
};
pub const STATIC_NS_CLASS_B: ClassN<2> = ClassN {
class_name: "B",
names: &["0"],
};
}
mod dynamic {
use super::*;
pub struct TestDynamic<const N: u8>(pub String, &'static str);
impl<const N: u8> namespace::Dynamic for TestDynamic<N> {
fn namespace_name(&self) -> &str {
self.1
}
fn symbol_name(&self) -> &str {
&self.0
}
fn dyn_clone(&self) -> Box<dyn namespace::Dynamic> {
Box::new(TestDynamic::<N>(self.0.clone(), self.1))
}
fn dyn_eq(&self, rhs: &dyn namespace::Dynamic) -> bool {
self.0 == rhs.downcast_ref::<TestDynamic<N>>().unwrap().0
}
fn dyn_cmp(&self, rhs: &dyn namespace::Dynamic) -> std::cmp::Ordering {
self.0.cmp(&rhs.downcast_ref::<TestDynamic<N>>().unwrap().0)
}
fn dyn_hash(&self, state: &mut dyn std::hash::Hasher) {
state.write(self.0.as_bytes());
state.write_u8(0xff)
}
}
pub fn sym0(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<0>(str.into(), "dyn0")))
}
pub fn sym1(str: &str) -> Symbol {
Symbol::Dynamic(Box::new(TestDynamic::<1>(str.into(), "dyn1")))
}
}
const STATIC_A_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 0);
const STATIC_A_1: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_A, 1);
const STATIC_B_0: Symbol = Symbol::Static(&_static::STATIC_NS_CLASS_B, 0);
struct TestState {
random_state: std::collections::hash_map::RandomState,
}
impl TestState {
pub fn new() -> Self {
Self {
random_state: std::collections::hash_map::RandomState::new(),
}
}
fn assert_hash_match(&self, a: &Symbol, b: &Symbol, should_equal: bool) {
let mut hasher_a = self.random_state.build_hasher();
let mut hasher_b = self.random_state.build_hasher();
a.hash(&mut hasher_a);
b.hash(&mut hasher_b);
if should_equal {
assert_eq!(hasher_a.finish(), hasher_b.finish())
} else {
assert_ne!(hasher_a.finish(), hasher_b.finish())
}
}
fn assert_full_eq(&self, a: &Symbol, b: &Symbol) {
assert_eq!(a, b);
assert_eq!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, true)
}
fn assert_full_ne(&self, a: &Symbol, b: &Symbol) {
assert_ne!(a, b);
assert_ne!(a.cmp(b), Ordering::Equal);
self.assert_hash_match(a, b, false)
}
}
#[test]
fn test_symbol_size_of() {
let u_size = std::mem::size_of::<usize>();
// This size_of Symbol is computed like this:
// It's at least two words, because of `dyn`.
// it's more than two words because it needs to encode the A/B enum value.
// on 64-bit arch it should be 3 words, because it contains an `u32` too,
// and that should be encoded within the same machine word as the enum discriminant..
// I think...
let expected_word_size = match u_size {
8 => 3 * u_size,
// 4 => 4, Perhaps?
_ => panic!("untested word size"),
};
assert_eq!(std::mem::size_of::<Symbol>(), expected_word_size);
}
#[test]
fn test_debug() {
assert_eq!(format!("{:?}", STATIC_A_0), "A::0");
assert_eq!(format!("{:?}", STATIC_A_1), "A::1");
assert_eq!(format!("{:?}", STATIC_B_0), "B::0");
assert_eq!(format!("{:?}", dynamic::sym0("foo")), "dyn0::foo");
assert_eq!(format!("{:?}", dynamic::sym1("bar")), "dyn1::bar");
}
#[test]
fn | test_equality | identifier_name | |
xapian_backend.py | start_time = self.folders_with_date[0][0]
end_time = start_time + datetime.timedelta(days=50)
weibos = self.db.statuses.find({
self.schema['posted_at_key']: {
'$gte': calendar.timegm(start_time.timetuple()),
'$lt': calendar.timegm(end_time.timetuple())
}
}, timeout=False)
print 'prod mode: loaded weibos from mongod'
elif debug:
with open("../test/sample_tweets.js") as f:
weibos = json.loads(f.readline())
print 'debug mode: loaded weibos from file'
count = 0
try:
for weibo in weibos:
count += 1
posted_at = datetime.datetime.fromtimestamp(weibo[self.schema['posted_at_key']])
if not debug and start_time:
folder = self.folders_with_date[0][1]
elif debug:
for i in xrange(len(self.folders_with_date) - 1):
if self.folders_with_date[i][0] <= posted_at < self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i][1]
break
else:
if posted_at >= self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i + 1][1]
self.update(folder, weibo)
if count % PROCESS_IDX_SIZE == 0:
print '[%s] folder[%s] num indexed: %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), folder, count)
except Exception:
raise
finally:
for database in self.databases.itervalues():
database.close()
for _, folder in self.folders_with_date:
print 'index size', folder, self.document_count(folder)
def update(self, folder, weibo):
document = xapian.Document()
document_id = DOCUMENT_ID_TERM_PREFIX + weibo[self.schema['obj_id']]
for field in self.schema['idx_fields']:
self.index_field(field, document, weibo, SCHEMA_VERSION)
document.set_data(pickle.dumps(
weibo, pickle.HIGHEST_PROTOCOL
))
document.add_term(document_id)
self.get_database(folder).replace_document(document_id, document)
def index_field(self, field, document, weibo, schema_version):
prefix = DOCUMENT_CUSTOM_TERM_PREFIX + field['field_name'].upper()
if schema_version == 1:
if field['field_name'] in ['uid', 'name']:
term = _marshal_term(weibo[field['field_name']])
document.add_term(prefix + term)
elif field['field_name'] == 'ts':
document.add_value(field['column'], _marshal_value(weibo[field['field_name']]))
elif field['field_name'] == 'text':
tokens = [token[0] for token
in self.s.participle(weibo[field['field_name']].encode('utf-8'))
if len(token[0]) > 1]
for token in tokens:
if len(token) <= 10:
document.add_term(prefix + token)
document.add_value(field['column'], weibo[field['field_name']])
class XapianSearch(object):
def __init__(self, path='../data/', name='statuses', schema_version=SCHEMA_VERSION):
def create(dbpath):
return xapian.Database(dbpath)
def merge(db1, db2):
db1.add_database(db2)
return db1
self.database = reduce(merge,
map(create,
[path + p for p in os.listdir(path) if p.startswith('_%s' % name)]))
self.schema = getattr(Schema, 'v%s' % schema_version, None)
def parse_query(self, query_dict):
"""
Given a `query_dict`, will attempt to return a xapian.Query
Required arguments:
``query_dict`` -- A query dict similar to MongoDB style to parse
Returns a xapian.Query
Operator Reference:
Comparison:
equal, key = value, { key:value }
$lt, $gt, the field less or more than the specified value, { field: { $lt: value, $gt: value } }
Logical:
$and, perform logical AND operation in expressions, { $and: [{ <expression1> } , { <expression2> },
... , { <expressionN> }] }
$or, perform logical OR operation in expressions like the $and operation
$xor, perform logical XOR operation in expressions like the $and operation
$not, perform logical NOT operation in experssions, which get the conjunction of both negative
experssions, { $not: { <expression1> }, { <expression2> }, ... { <expressionN> } }
PS: if not any operation is specified, the logical AND operation is the default operation
(An implicit AND operation is performed when specifying a comma separated list of expressions).
See more query examples in test files.
"""
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
def build_query_tree(self, query_dict):
"""将字典转成语法树"""
ops = ['$not']
bi_ops = ['$or', '$and', '$xor']
def op(a, b, operation):
if operation == '$and':
return a & b
elif operation == '$or':
return a | b
elif operation == '$xor':
return a ^ b
else:
raise OperationError('Operation %s cannot be processed.' % operation)
def grammar_tree(query_dict):
total_query = Q()
for k in query_dict.keys():
if k in bi_ops:
#deal with expression without operator
bi_query = reduce(lambda a, b: op(a, b, k),
map(lambda expr: Q(**expr),
filter(lambda expr: not (set(expr.keys()) & set(ops + bi_ops)), query_dict[k])), Q())
#deal with nested expression
nested_query = reduce(lambda a, b: op(a, b, k),
map(lambda query_dict: grammar_tree(query_dict),
filter(lambda expr: set(expr.keys()) & set(ops + bi_ops), query_dict[k])), Q())
if nested_query:
total_query &= op(bi_query, nested_query, k)
else:
total_query &= bi_query
elif k in ops:
if k == '$not':
not_dict = {}
#nested_query_dict = {}
for not_k in query_dict[k]:
if not_k not in ops + bi_ops:
not_dict[not_k] = query_dict[k][not_k]
else:
pass
#nested query in a $not statement is not implemented
#nested_query_dict.update({not_k: query_dict[k][not_k]})
not_query = notQ(**not_dict)
total_query &= not_query
else:
total_query &= Q(**{k: query_dict[k]})
return total_query
total_query = grammar_tree(query_dict)
return total_query
def search(self, query=None, sort_by=None, start_offset=0,
max_offset=1000, fields=None, **kwargs):
query = self.parse_query(query)
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
database = self.database
enquire = xapian.Enquire(database)
enquire.set_query(query)
if sort_by:
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False # Reverse is inverted in Xapian -- http://trac.xapian.org/ticket/311
sorter.add(self._value_column(sort_field), reverse)
enquire.set_sort_by_key_then_relevance(sorter, True)
results = []
if not max_offset:
max_offset = database.get_doccount() - start_offset
matches = self._get_enquire_mset(database, enquire, start_offset, max_offset)
for match in matches:
weibo = pickle.loads(self._get_document_data(database, match.document))
item = None
if fields is not None: # 如果fields为[], 这情况下,不返回任何一项
item = {}
for field in fields:
item[field] = weibo[field]
else:
item = weibo
results.append(item)
return {
'results': results,
'hits': self._get_hit_count(database, enquire)
}
def _get_enquire_mset(self, database, enquire, start_offset, max_offset):
"""
A safer version of Xapian.enquire.get_mset
Simply wraps the Xapian version and catches any `Xapian.DatabaseModifiedError`,
attempting a `database.reopen` as needed.
Required arguments:
` | n self.databases[folder]
#@profile
def load_and_index_weibos(self, start_time=None):
if not debug and start_time:
| identifier_body | |
xapian_backend.py | )):
value = xapian.sortable_serialise(value)
return value
def _marshal_term(term):
"""
Private utility method that converts Python terms to a string for Xapian terms.
"""
if isinstance(term, int):
term = str(term)
return term
def _database(folder, writable=False):
"""
Private method that returns a xapian.Database for use.
Optional arguments:
``writable`` -- Open the database in read/write mode (default=False)
Returns an instance of a xapian.Database or xapian.WritableDatabase
"""
if writable:
if debug:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OVERWRITE)
else:
database = xapian.WritableDatabase(folder, xapian.DB_CREATE_OR_OPEN)
else:
try:
database = xapian.Database(folder)
except xapian.DatabaseOpeningError:
raise InvalidIndexError(u'Unable to open index at %s' % folder)
return database
class InvalidIndexError(Exception):
"""Raised when an index can not be opened."""
pass
class InvalidQueryError(Exception):
"""Raised when a query is illegal."""
pass
class OperationError(Exception):
"""Raised when queries cannot be operated."""
pass
class QNodeVisitor(object):
"""
Base visitor class for visiting Q-object nodes in a query tree.
"""
def visit_combination(self, combination):
"""
Called by QCombination objects.
"""
return combination
def visit_query(self, query):
"""
Called by (New)Q objects.
"""
return query
def visit_not_query(self, query):
"""
Called by (New)NOT Q objects.
"""
return query
class SimplificationVisitor(QNodeVisitor):
"""
Simplifies query trees by combinging unnecessary 'and' connection nodes
into a single Q-object.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
"""
The simplification only applies to 'simple' queries
如果最外层的操作符是and,然后里面的每个元素都是一个独自的Q且不是not Q
将所有的Q的query抽出来,到一个query里面来
"""
if all(isinstance(node, Q) and not isinstance(node, notQ)
for node in combination.children):
queries = [node.query for node in combination.children]
return Q(**self._query_conjunction(queries))
return combination
def _query_conjunction(self, queries):
"""
Merges query dicts - effectively &ing them together.
"""
query_ops = set()
combined_query = {}
for query in queries:
ops = set(query.keys())
# Make sure that the same operation isn't applied more than once
# to a single field
intersection = ops & query_ops
if intersection:
msg = 'Duplicate query conditions: '
raise InvalidQueryError(msg + ', '.join(intersection))
query_ops.update(ops)
combined_query.update(copy.deepcopy(query))
return combined_query
class QueryTreeTransformerVisitor(QNodeVisitor):
"""
Transforms the query tree in to a form that may be more effective used with Xapian.
"""
def visit_combination(self, combination):
if combination.operation == combination.AND:
# Move the ORs up the tree to one 'master' $or.
# Firstly, we must find all the necessary parts (part
# of an AND combination or just standard Q object), and store them
# separately from the OR parts.
or_groups = []
and_parts = []
for node in combination.children:
if isinstance(node, QCombination):
if node.operation == node.OR:
# Any of the children in an $or component may cause
# the query to succeed
or_groups.append(node.children)
elif node.operation == node.AND:
and_parts.append(node)
elif isinstance(node, Q):
and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""
Compiles the nodes in a query tree to a Xapian-compatible query.
"""
def __init__(self, schema, database):
self.schema = schema
self.database = database
def visit_combination(self, combination):
if combination.operation == combination.OR:
return xapian.Query(xapian.Query.OP_OR, combination.children)
elif combination.operation == combination.AND:
return xapian.Query(xapian.Query.OP_AND, combination.children)
elif combination.operation == combination.AND_NOT:
return xapian.Query(xapian.Query.OP_AND_NOT, combination.children)
elif combination.operation == combination.XOR:
return xapian.Query(xapian.Query.OP_XOR, combination.children)
return combination
def visit_not_query(self, query):
new_query = self.visit_query(query, n=True)
#NOT set is the intersection of universal set AND NOT set
new_query = xapian.Query(xapian.Query.OP_AND_NOT, [xapian.Query(''), new_query])
return new_query
def visit_query(self, query, n=False):
query_dict = query.query
qp = xapian.QueryParser()
qp.set_database(self.database)
field_prefix = {}
field_type = {}
field_col = {}
for field_dict in self.schema['idx_fields']:
fname = field_dict['field_name']
field_col[fname] = field_dict['column']
field_type[fname] = field_dict['type']
field_prefix[fname] = DOCUMENT_CUSTOM_TERM_PREFIX + fname.upper()
pre_query = None
new_query = None
for field in query_dict:
if field in field_prefix:
prefix = field_prefix[field]
col = field_col[field]
value = query_dict[field]
if isinstance(value, dict):
ftype = field_type[field]
if ftype == 'int' or ftype == 'long':
begin = value.get('$gt', 0)
end = value.get('$lt', sys.maxint)
qp.add_valuerangeprocessor(xapian.NumberValueRangeProcessor(col, '%s' % prefix))
new_query = qp.parse_query('%s%s..%s' % (prefix, begin, end))
elif not hasattr(value, 'strip') and hasattr(value, '__getitem__') or hasattr(value, '__iter__'):
value = ['%s%s' % (prefix, v) for v in value]
#De Morgan's laws, if we want the intersection of negation sets,
#Firstly, we obtain the disjunction of this sets, then get negation of them
# (AND_NOT [U, (OR, [a, b, c])])
# NOT (a OR B OR C)
# NOT a AND not b AND not C
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, value)
else:
new_query = xapian.Query(xapian.Query.OP_OR, value)
else:
new_query = xapian.Query('%s%s' % (prefix, value))
if pre_query:
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, [pre_query, new_query])
else:
# and_not , [U, a or b])
# not a and not b
new_query = xapian.Query(xapian.Query.OP_OR, [pre_query, new_query])
pre_query = new_query
return new_query
class QNode(object):
"""
Base class for nodes in query trees.
"""
AND = 0
AND_NOT = 1
OR = 2
XOR = 3
NOT = 4
def to_query(self, schema, database):
'''
The query optimization is a bit harder, so we just leave the optimization of query
to user's own judgement and come back to it in the future.
'''
#query = self.accept(SimplificationVisitor())
#query = query.accept(QueryTreeTransformerVisitor())
query = self.accept(QueryCompilerVisitor(schema, database))
return query
def accept(self, visitor):
"""在to_query里被调用,不同子类有不同实现"""
raise NotImplementedError
def _combine(self, other, operation):
"""
Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty'):
return self
| if self.empty:
return other
| random_line_split | |
xapian_backend.py | (self):
self.emotion_words = [line.strip('\r\n') for line in file(EXTRA_EMOTIONWORD_PATH)]
def load_scws(self):
s = scws.Scws()
s.set_charset(SCWS_ENCODING)
s.set_dict(CHS_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CHT_DICT_PATH, scws.XDICT_MEM)
s.add_dict(CUSTOM_DICT_PATH, scws.XDICT_TXT)
# 把停用词全部拆成单字,再过滤掉单字,以达到去除停用词的目的
s.add_dict(EXTRA_STOPWORD_PATH, scws.XDICT_TXT)
# 即基于表情表对表情进行分词,必要的时候在返回结果处或后剔除
s.add_dict(EXTRA_EMOTIONWORD_PATH, scws.XDICT_TXT)
s.set_rules(SCWS_RULES)
s.set_ignore(IGNORE_PUNCTUATION)
self.s = s
def load_mongod(self):
connection = pymongo.Connection()
db = connection.admin
db.authenticate('root', 'root')
db = connection.weibo
self.db = db
def get_database(self, folder):
if folder not in self.databases:
self.databases[folder] = _database(folder, writable=True)
return self.databases[folder]
#@profile
def load_and_index_weibos(self, start_time=None):
if not debug and start_time:
start_time = self.folders_with_date[0][0]
end_time = start_time + datetime.timedelta(days=50)
weibos = self.db.statuses.find({
self.schema['posted_at_key']: {
'$gte': calendar.timegm(start_time.timetuple()),
'$lt': calendar.timegm(end_time.timetuple())
}
}, timeout=False)
print 'prod mode: loaded weibos from mongod'
elif debug:
with open("../test/sample_tweets.js") as f:
weibos = json.loads(f.readline())
print 'debug mode: loaded weibos from file'
count = 0
try:
for weibo in weibos:
count += 1
posted_at = datetime.datetime.fromtimestamp(weibo[self.schema['posted_at_key']])
if not debug and start_time:
folder = self.folders_with_date[0][1]
elif debug:
for i in xrange(len(self.folders_with_date) - 1):
if self.folders_with_date[i][0] <= posted_at < self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i][1]
break
else:
if posted_at >= self.folders_with_date[i + 1][0]:
folder = self.folders_with_date[i + 1][1]
self.update(folder, weibo)
if count % PROCESS_IDX_SIZE == 0:
print '[%s] folder[%s] num indexed: %s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), folder, count)
except Exception:
raise
finally:
for database in self.databases.itervalues():
database.close()
for _, folder in self.folders_with_date:
print 'index size', folder, self.document_count(folder)
def update(self, folder, weibo):
document = xapian.Document()
document_id = DOCUMENT_ID_TERM_PREFIX + weibo[self.schema['obj_id']]
for field in self.schema['idx_fields']:
self.index_field(field, document, weibo, SCHEMA_VERSION)
document.set_data(pickle.dumps(
weibo, pickle.HIGHEST_PROTOCOL
))
document.add_term(document_id)
self.get_database(folder).replace_document(document_id, document)
def index_field(self, field, document, weibo, schema_version):
prefix = DOCUMENT_CUSTOM_TERM_PREFIX + field['field_name'].upper()
if schema_version == 1:
if field['field_name'] in ['uid', 'name']:
term = _marshal_term(weibo[field['field_name']])
document.add_term(prefix + term)
elif field['field_name'] == 'ts':
document.add_value(field['column'], _marshal_value(weibo[field['field_name']]))
elif field['field_name'] == 'text':
tokens = [token[0] for token
in self.s.participle(weibo[field['field_name']].encode('utf-8'))
if len(token[0]) > 1]
for token in tokens:
if len(token) <= 10:
document.add_term(prefix + token)
document.add_value(field['column'], weibo[field['field_name']])
class XapianSearch(object):
def __init__(self, path='../data/', name='statuses', schema_version=SCHEMA_VERSION):
def create(dbpath):
return xapian.Database(dbpath)
def merge(db1, db2):
db1.add_database(db2)
return db1
self.database = reduce(merge,
map(create,
[path + p for p in os.listdir(path) if p.startswith('_%s' % name)]))
self.schema = getattr(Schema, 'v%s' % schema_version, None)
def parse_query(self, query_dict):
"""
Given a `query_dict`, will attempt to return a xapian.Query
Required arguments:
``query_dict`` -- A query dict similar to MongoDB style to parse
Returns a xapian.Query
Operator Reference:
Comparison:
equal, key = value, { key:value }
$lt, $gt, the field less or more than the specified value, { field: { $lt: value, $gt: value } }
Logical:
$and, perform logical AND operation in expressions, { $and: [{ <expression1> } , { <expression2> },
... , { <expressionN> }] }
$or, perform logical OR operation in expressions like the $and operation
$xor, perform logical XOR operation in expressions like the $and operation
$not, perform logical NOT operation in experssions, which get the conjunction of both negative
experssions, { $not: { <expression1> }, { <expression2> }, ... { <expressionN> } }
PS: if not any operation is specified, the logical AND operation is the default operation
(An implicit AND operation is performed when specifying a comma separated list of expressions).
See more query examples in test files.
"""
if query_dict is None:
return xapian.Query('') # Match everything
elif query_dict == {}:
return xapian.Query() # Match nothing
query_tree = self.build_query_tree(query_dict)
return query_tree.to_query(self.schema, self.database)
def build_query_tree(self, query_dict):
"""将字典转成语法树"""
ops = ['$not']
bi_ops = ['$or', '$and', '$xor']
def op(a, b, operation):
if operation == '$and':
return a & b
elif operation == '$or':
return a | b
elif operation == '$xor':
return a ^ b
else:
raise OperationError('Operation %s cannot be processed.' % operation)
def grammar_tree(query_dict):
total_query = Q()
for k in query_dict.keys():
if k in bi_ops:
#deal with expression without operator
bi_query = reduce(lambda a, b: op(a, b, k),
map(lambda expr: Q(**expr),
filter(lambda expr: not (set(expr.keys()) & set(ops + bi_ops)), query_dict[k])), Q())
#deal with nested expression
nested_query = reduce(lambda a, b: op(a, b, k),
map(lambda query_dict: grammar_tree(query_dict),
filter(lambda expr: set(expr.keys()) & set(ops + bi_ops), query_dict[k])), Q())
if nested_query:
total_query &= op(bi_query, nested_query, k)
else:
total_query &= bi_query
elif k in ops:
if k == '$not':
not_dict = {}
#nested_query_dict = {}
for not_k in query_dict[k]:
if not_k not in ops + bi_ops:
not_dict[not_k] = query_dict[k][not_k]
else:
pass
#nested query in a $not statement is not implemented
#nested_query_dict.update({not_k: query_dict[k][not_k]})
not_query = notQ(**not_dict)
total_query &= not_query
else:
total_query &= Q(**{k: query_dict[k]})
return total_query
total_query = grammar_tree(query_dict)
return total_query
def search(self, query=None, sort_by=None, start_offset=0,
max_offset=1000, fields=None, **kwargs):
query = self.parse_query(query)
if xapian.Query.empty(query):
return {
'results': [],
'hits': 0,
}
database = self.database
enquire = xapian.Enquire(database)
enquire.set_query(query)
if sort_by:
sorter = xapian.MultiValueSorter()
for sort_field in sort_by:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip | load_extra_dic | identifier_name | |
xapian_backend.py | and_parts.append(node)
# Now we combine the parts into a usable query. AND together all of
# the necessary parts. Then for each $or part, create a new query
# that ANDs the necessary part with the $or part.
clauses = []
for or_group in product(*or_groups):
q_object = reduce(lambda a, b: a & b, and_parts, Q())
q_object = reduce(lambda a, b: a & b, or_group, q_object)
clauses.append(q_object)
# Finally, $or the generated clauses in to one query. Each of the
# clauses is sufficient for the query to succeed.
return reduce(lambda a, b: a | b, clauses, Q())
if combination.operation == combination.OR:
children = []
for node in combination.children:
if (isinstance(node, QCombination) and
node.operation == combination.OR):
children += node.children
else:
children.append(node)
combination.children = children
return combination
class QueryCompilerVisitor(QNodeVisitor):
"""
Compiles the nodes in a query tree to a Xapian-compatible query.
"""
def __init__(self, schema, database):
self.schema = schema
self.database = database
def visit_combination(self, combination):
if combination.operation == combination.OR:
return xapian.Query(xapian.Query.OP_OR, combination.children)
elif combination.operation == combination.AND:
return xapian.Query(xapian.Query.OP_AND, combination.children)
elif combination.operation == combination.AND_NOT:
return xapian.Query(xapian.Query.OP_AND_NOT, combination.children)
elif combination.operation == combination.XOR:
return xapian.Query(xapian.Query.OP_XOR, combination.children)
return combination
def visit_not_query(self, query):
new_query = self.visit_query(query, n=True)
#NOT set is the intersection of universal set AND NOT set
new_query = xapian.Query(xapian.Query.OP_AND_NOT, [xapian.Query(''), new_query])
return new_query
def visit_query(self, query, n=False):
query_dict = query.query
qp = xapian.QueryParser()
qp.set_database(self.database)
field_prefix = {}
field_type = {}
field_col = {}
for field_dict in self.schema['idx_fields']:
fname = field_dict['field_name']
field_col[fname] = field_dict['column']
field_type[fname] = field_dict['type']
field_prefix[fname] = DOCUMENT_CUSTOM_TERM_PREFIX + fname.upper()
pre_query = None
new_query = None
for field in query_dict:
if field in field_prefix:
prefix = field_prefix[field]
col = field_col[field]
value = query_dict[field]
if isinstance(value, dict):
ftype = field_type[field]
if ftype == 'int' or ftype == 'long':
begin = value.get('$gt', 0)
end = value.get('$lt', sys.maxint)
qp.add_valuerangeprocessor(xapian.NumberValueRangeProcessor(col, '%s' % prefix))
new_query = qp.parse_query('%s%s..%s' % (prefix, begin, end))
elif not hasattr(value, 'strip') and hasattr(value, '__getitem__') or hasattr(value, '__iter__'):
value = ['%s%s' % (prefix, v) for v in value]
#De Morgan's laws, if we want the intersection of negation sets,
#Firstly, we obtain the disjunction of this sets, then get negation of them
# (AND_NOT [U, (OR, [a, b, c])])
# NOT (a OR B OR C)
# NOT a AND not b AND not C
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, value)
else:
new_query = xapian.Query(xapian.Query.OP_OR, value)
else:
new_query = xapian.Query('%s%s' % (prefix, value))
if pre_query:
if not n:
new_query = xapian.Query(xapian.Query.OP_AND, [pre_query, new_query])
else:
# and_not , [U, a or b])
# not a and not b
new_query = xapian.Query(xapian.Query.OP_OR, [pre_query, new_query])
pre_query = new_query
return new_query
class QNode(object):
"""
Base class for nodes in query trees.
"""
AND = 0
AND_NOT = 1
OR = 2
XOR = 3
NOT = 4
def to_query(self, schema, database):
'''
The query optimization is a bit harder, so we just leave the optimization of query
to user's own judgement and come back to it in the future.
'''
#query = self.accept(SimplificationVisitor())
#query = query.accept(QueryTreeTransformerVisitor())
query = self.accept(QueryCompilerVisitor(schema, database))
return query
def accept(self, visitor):
"""在to_query里被调用,不同子类有不同实现"""
raise NotImplementedError
def _combine(self, other, operation):
"""
Combine this node with another node into a QCombination object.
"""
if getattr(other, 'empty'):
return self
if self.empty:
return other
return QCombination(operation, [self, other])
@property
def empty(self):
return False
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
class QCombination(QNode):
"""
Represents the combination of several conditions by a given logical
operator.
"""
def __init__(self, operation, children):
self.operation = operation
self.children = []
for node in children:
# If the child is a combination of the same type, we can merge its
# children directly into this combinations children
if isinstance(node, QCombination) and node.operation == operation:
self.children += node.children
else:
self.children.append(node)
def accept(self, visitor):
for i in range(len(self.children)):
if isinstance(self.children[i], QNode):
self.children[i] = self.children[i].accept(visitor)
return visitor.visit_combination(self)
@property
def empty(self):
return not self.children
def __repr__(self):
return '%s: (%s, [%s])' % \
(type(self), OPERATIONINT2STR[str(self.operation)], ', '.join([str(x) for x in self.children]))
class Q(QNode):
"""
A simple query object, used in a query tree to build up more complex
query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_query(self)
@property
def empty(self):
return not self.query
def __repr__(self):
return '%s: %s' % (type(self), self.query)
class notQ(Q):
"""
A query object based on simple query object, used in a query tree to
build up NOT query structures.
"""
def __init__(self, **query):
self.query = query
def accept(self, visitor):
return visitor.visit_not_query(self)
class Schema:
v1 = {
'obj_id': '_id',
'posted_at_key': 'ts',
'idx_fields': [
{'field_name': 'uid', 'column': 0, 'type': 'long'},
{'field_name': 'name', 'column': 1, 'type': 'text'},
{'field_name': 'text', 'column': 2, 'type': 'text'},
{'field_name': 'ts', 'column': 3, 'type': 'long'}
],
}
if __name__ == "__main__":
"""
cd to test/ folder
then run 'py (-m memory_profiler) ../xapian_weibo/xapian_backend.py -d hehe'
http://pypi.python.org/pypi/memory_profiler
"""
parser = ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', help='DEBUG')
parser.a | dd_argument('-p', '--print_folders', action='store_true', help='PRINT FOLDER THEN EXIT')
parser.add_argument('-s', '--start_time', nargs=1, help='DATETIME')
parser.add_argument('dbpath', help='PATH_TO_DATABASE')
args = parser.parse_args(sys.argv[1:])
debug = args.debug
dbpath = args.dbpath
if args.print_folders:
debug = True
xapian_indexer = XapianIndex(dbpath, SCHEMA_VERSION)
xapian_indexer.generate()
for _, folder in xapian_indexer.folders_with_date:
print folder
sys.exit(0)
start_time = args.start_time[0] if args.start_time else None
if debug:
if start_time:
print 'debug mode(warning): start_time will not be used' | conditional_block | |
y.go | , -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27,
}
var yyDef = []int{
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yylex1(lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int | /* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yy | {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack: | identifier_body |
y.go | , -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27, |
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yylex1(lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yy | }
var yyDef = []int{ | random_line_split |
y.go | , -1000, -1000, 56, 2, 1, -8,
-10, -1000, -1000, -10, -10, -10, 114, -1000, -10, -10,
-9, -14, -16, -2, 136, -4, -13, -1000, -10, -10,
-10, -10, -10, -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27,
}
var yyDef = []int{
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func | (lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yy | yylex1 | identifier_name |
y.go | , -10, -10, -10, -10, -10, 104, -1000,
-10, -10, -10, 16, -10, 15, 14, 48, 48, -1000,
-1000, 84, 84, 84, 84, 84, 84, -1000, -23, -25,
70, -1000, 136, -1000, -1000, -1000, -1000, -10, 45, -10,
80, -1000,
}
var yyPgo = []int{
0, 65, 46, 0, 44, 6, 42,
}
var yyR1 = []int{
0, 6, 1, 1, 1, 1, 2, 2, 2, 2,
4, 4, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 5, 5,
}
var yyR2 = []int{
0, 1, 0, 2, 2, 2, 4, 5, 5, 5,
1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 2, 4, 4, 8, 1, 3,
}
var yyChk = []int{
-1000, -6, -1, -2, 25, 24, 17, 20, 21, 22,
-4, 15, 16, 26, 26, 26, -3, 17, 26, 11,
18, 19, 23, -5, -3, -5, -5, 25, 10, 11,
12, 13, 4, 5, 6, 7, 8, 9, -3, -3,
26, 26, 26, 27, 28, 27, 27, -3, -3, -3,
-3, -3, -3, -3, -3, -3, -3, 27, -5, -5,
-3, 25, -3, 25, 25, 27, 27, 28, -3, 28,
-3, 27,
}
var yyDef = []int{
2, -2, 1, 3, 4, 5, 0, 0, 0, 0,
0, 10, 11, 0, 0, 0, 0, 12, 0, 0,
0, 0, 0, 0, 28, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
0, 0, 0, 0, 0, 0, 0, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 13, 0, 0,
0, 7, 29, 8, 9, 25, 26, 0, 0, 0,
0, 27,
}
var yyTok1 = []int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
25, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
26, 27, 12, 10, 28, 11, 3, 13, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 15, 6,
}
var yyTok2 = []int{
2, 3, 5, 7, 8, 9, 14, 16, 17, 18,
19, 20, 21, 22, 23, 24,
}
var yyTok3 = []int{
0,
}
//line yaccpar:1
/* parser for yacc output */
var yyDebug = 0
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
const yyFlag = -1000
func yyTokname(c int) string {
// 4 is TOKSTART above
if c >= 4 && c-4 < len(yyToknames) {
if yyToknames[c-4] != "" {
return yyToknames[c-4]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yylex1(lex yyLexer, lval *yySymType) int {
c := 0
char := lex.Lex(lval)
if char <= 0 {
c = yyTok1[0]
goto out
}
if char < len(yyTok1) {
c = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
c = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
c = yyTok3[i+0]
if c == char {
c = yyTok3[i+1]
goto out
}
}
out:
if c == 0 {
c = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %U %s\n", uint(char), yyTokname(c))
}
return c
}
func yyParse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yyS := make([]yySymType, yyMaxDepth)
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yychar := -1
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yychar), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yychar < 0 {
yychar = yylex1(yylex, &yylval)
}
yyn += yychar
if yyn < 0 || yyn >= yyLast {
goto yydefault
}
yyn = yyAct[yyn]
if yyChk[yyn] == yychar { /* valid shift */
yychar = -1
yyVAL = yylval
yystate = yyn
if Errflag > 0 {
Errflag--
}
goto yystack
}
yydefault:
/* default state action */
yyn = yyDef[yystate]
if yyn == -2 {
if yychar < 0 | {
yychar = yylex1(yylex, &yylval)
} | conditional_block | |
mod.rs | + 'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker + 'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration .
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task + 'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() {
self.broker.decrease_prefetch_count().await?;
}
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
maybe_delivery_result = deliveries.next() => {
if let Some(delivery_result) = maybe_delivery_result {
let event_tx = event_tx.clone();
tokio::spawn(self.handle_delivery(delivery_result, event_tx));
}
},
_ = signals.next() => {
warn!("Ope! Hitting Ctrl+C again will terminate all running tasks!");
info!("Warm shutdown...");
break;
},
maybe_event = event_rx.next() => {
if let Some(event) = maybe_event {
debug!("Received task event {:?}", event);
match event.status { | TaskStatus::Pending => pending_tasks += 1,
TaskStatus::Finished => pending_tasks -= 1,
};
} | random_line_split | |
mod.rs | >(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker + 'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker + 'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration .
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task + 'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() |
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! {
| {
self.broker.decrease_prefetch_count().await?;
} | conditional_block |
mod.rs | <T: Task>(&self, task: &T) -> Self {
Self {
timeout: task.timeout().or(self.timeout),
max_retries: task.max_retries().or(self.max_retries),
min_retry_delay: task.min_retry_delay().unwrap_or(self.min_retry_delay),
max_retry_delay: task.max_retry_delay().unwrap_or(self.max_retry_delay),
}
}
}
#[derive(Clone, Debug)]
enum TaskStatus {
Pending,
Finished,
}
#[derive(Clone, Debug)]
struct TaskEvent {
status: TaskStatus,
}
impl TaskEvent {
fn new(status: TaskStatus) -> Self {
Self { status }
}
}
struct Config<B>
where
B: Broker + 'static,
{
name: String,
broker: B,
default_queue_name: String,
task_options: TaskOptions,
}
/// Used to create a `Celery` app with a custom configuration.
pub struct CeleryBuilder<B>
where
B: Broker + 'static,
{
config: Config<B>,
}
impl<B> CeleryBuilder<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
fn new(name: &str, broker: B) -> Self {
Self {
config: Config {
name: name.into(),
broker,
default_queue_name: "celery".into(),
task_options: TaskOptions {
timeout: None,
max_retries: None,
min_retry_delay: 0,
max_retry_delay: 3600,
},
},
}
}
/// Set the name of the default queue.
pub fn default_queue_name(mut self, queue_name: &str) -> Self {
self.config.default_queue_name = queue_name.into();
self
}
/// Set a default timeout for tasks.
pub fn task_timeout(mut self, task_timeout: usize) -> Self {
self.config.task_options.timeout = Some(task_timeout);
self
}
/// Set a default maximum number of retries for tasks.
pub fn task_max_retries(mut self, task_max_retries: usize) -> Self {
self.config.task_options.max_retries = Some(task_max_retries);
self
}
/// Set a default minimum retry delay for tasks.
pub fn task_min_retry_delay(mut self, task_min_retry_delay: usize) -> Self {
self.config.task_options.min_retry_delay = task_min_retry_delay;
self
}
/// Set a default maximum retry delay for tasks.
pub fn task_max_retry_delay(mut self, task_max_retry_delay: usize) -> Self {
self.config.task_options.max_retry_delay = task_max_retry_delay;
self
}
/// Construct a `Celery` app with the current configuration .
pub fn build(self) -> Celery<B> {
Celery {
name: self.config.name,
broker: self.config.broker,
default_queue_name: self.config.default_queue_name,
task_trace_builders: RwLock::new(HashMap::new()),
task_options: self.config.task_options,
}
}
}
/// A `Celery` app is used to produce or consume tasks asyncronously.
pub struct Celery<B: Broker> {
/// An arbitrary, human-readable name for the app.
pub name: String,
/// The app's broker.
pub broker: B,
/// The default queue to send and receive from.
pub default_queue_name: String,
/// Mapping of task name to task tracer factory. Used to create a task tracer
/// from an incoming message.
task_trace_builders: RwLock<HashMap<String, TraceBuilder>>,
/// Default task options.
task_options: TaskOptions,
}
impl<B> Celery<B>
where
B: Broker + 'static,
{
/// Get a `CeleryBuilder` for creating a `Celery` app with a custom configuration.
pub fn builder(name: &str, broker: B) -> CeleryBuilder<B> {
CeleryBuilder::new(name, broker)
}
/// Create a new `Celery` app with the given name and broker.
pub fn new(name: &str, broker: B) -> Self {
Self::builder(name, broker).build()
}
/// Send a task to a remote worker.
pub async fn send_task<T: Task>(&self, task: T, queue: &str) -> Result<(), Error> {
let body = MessageBody::new(task);
let data = serde_json::to_vec(&body)?;
let message = Message::builder(T::NAME, data).build();
debug!("Sending message {:?}", message);
self.broker.send(&message, queue).await
}
/// Register a task.
pub fn register_task<T: Task + 'static>(&self) -> Result<(), Error> {
let mut task_trace_builders = self
.task_trace_builders
.write()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if task_trace_builders.contains_key(T::NAME) {
Err(ErrorKind::TaskAlreadyExists(T::NAME.into()).into())
} else {
task_trace_builders.insert(T::NAME.into(), Box::new(build_tracer::<T>));
info!("Registered task {}", T::NAME);
Ok(())
}
}
fn get_task_tracer(
&self,
message: Message,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<Box<dyn TracerTrait>, Error> {
let task_trace_builders = self
.task_trace_builders
.read()
.map_err(|_| Error::from(ErrorKind::SyncError))?;
if let Some(build_tracer) = task_trace_builders.get(&message.headers.task) {
Ok(build_tracer(message, self.task_options, event_tx)?)
} else {
Err(ErrorKind::UnregisteredTaskError(message.headers.task).into())
}
}
/// Trie converting a delivery into a `Message`, executing the corresponding task,
/// and communicating with the broker.
async fn try_handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) -> Result<(), Error> {
let delivery = delivery_result.map_err(|e| e.into())?;
debug!("Received delivery: {:?}", delivery);
let message = match delivery.try_into_message() {
Ok(message) => message,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
let mut tracer = match self.get_task_tracer(message, event_tx) {
Ok(tracer) => tracer,
Err(e) => {
self.broker.ack(delivery).await?;
return Err(e);
}
};
if tracer.is_delayed() {
// Task has an ETA, so we need to increment the prefetch count.
if let Err(e) = self.broker.increase_prefetch_count().await {
// If for some reason this operation fails, we should stop tracing
// this task and send it back to the broker to retry.
// Otherwise we could reach the prefetch_count and end up blocking
// other deliveries if there are a high number of messages with a
// future ETA.
self.broker.retry(delivery, None).await?;
return Err(e);
};
}
match tracer.trace().await {
Ok(_) => {
self.broker.ack(delivery).await?;
}
Err(e) => match e.kind() {
ErrorKind::Retry => {
let retry_eta = tracer.retry_eta();
self.broker.retry(delivery, retry_eta).await?
}
_ => self.broker.ack(delivery).await?,
},
};
if tracer.is_delayed() {
self.broker.decrease_prefetch_count().await?;
}
Ok(())
}
/// Wraps `try_handle_delivery` to catch any and all errors that might occur.
async fn handle_delivery(
&self,
delivery_result: Result<B::Delivery, B::DeliveryError>,
event_tx: UnboundedSender<TaskEvent>,
) {
if let Err(e) = self.try_handle_delivery(delivery_result, event_tx).await {
error!("{}", e);
}
}
/// Consume tasks from a queue.
pub async fn consume(&'static self, queue: &str) -> Result<(), Error> {
// Stream of deliveries from the queue.
let mut deliveries = Box::pin(self.broker.consume(queue).await?.fuse());
// Stream of OS signals.
let mut signals = signal(SignalKind::interrupt())?.fuse();
// A sender and receiver for task related events.
// NOTE: we can use an unbounded channel since we already have backpressure
// from the `prefetch_count` setting.
let (event_tx, event_rx) = mpsc::unbounded_channel::<TaskEvent>();
let mut event_rx = event_rx.fuse();
let mut pending_tasks = 0;
// This is the main loop where we receive deliveries and pass them off
// to be handled by spawning `self.handle_delivery`.
// At the same time we are also listening for a SIGINT (Ctrl+C) interruption.
// If that occurs we break from this loop and move to the warm shutdown loop
// if there are still any pending tasks (tasks being executed, not including
// tasks being delayed due to a future ETA).
loop {
select! | overrides | identifier_name | |
ddpg.py | cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class | (object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient})
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self | ActorNetwork | identifier_name |
ddpg.py | ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class ActorNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
|
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self | self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient}) | identifier_body |
ddpg.py | ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
| epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class ActorNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient = tf.placeholder(tf.float32, [None] + self.a_dim)
self.actor_gradients = tf.gradients(self.actor_out, self.actor_weights, -self.action_gradient)
self.optimize = tf.train.AdamOptimizer(self.learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor_weights))
def create_actor_network(self, scope_name):
with tf.variable_scope(scope_name):
X = tf.placeholder(tf.float32, shape = [None] + self.s_dim)
I = tf.to_float(X) / 255.0
with tf.variable_scope('conv1'):
H1 = conv2d(I, [8, 8, 3, 32], [32])
with tf.variable_scope('conv2'):
H2 = conv2d(H1, [4, 4, 32, 64], [64])
with tf.variable_scope('dense1'):
H3 = tf.reshape(H2, [-1, 2, 5 * 5 * 32])
O = tf.layers.dense(H3, self.a_dim[1], activation = tf.nn.softmax)
W = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = scope_name)
return I, W, O
def train(self, inputs, a_gradient):
self.sess.run(self.optimize, feed_dict = {self.actor_inputs_dirt: inputs,
self.action_gradient: a_gradient})
def predict(self, inputs):
return self.sess.run(self.actor_out, feed_dict = {
self.actor_inputs_dirt: inputs
})
def predict_target(self, inputs):
return self.sess.run(self.target_actor_out, feed_dict = {self.target_actor_inputs_dirt: inputs})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
class CriticNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.critic_inputs_dirt, self | delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS): | conditional_block |
ddpg.py |
TAU = 0.01
cube_map_pose = OrderedDict([('cube_blue_1', [1.5, -0.3, 0.80]), ('cube_blue_2', [1.5, -0.05, 0.80]),
('cube_red_1', [1.5, 0.2, 0.80]), ('cube_red_2', [1.7, -0.15, 0.80]),
('cube_green_1', [1.65, 0.05, 0.80]), ('cube_green_2', [1.6, 0.32, 0.80])
])
#
# cube_map_pose = OrderedDict([('cube_blue_1', [1.4, -0.3, 0.80]), ('cube_blue_2', [1.4, -0.3, 1.5]),
# ('cube_red_1', [1.4, 0.2, 0.83]), ('cube_red_2', [1.6, 0.2, 0.83]),
# ('cube_green_1', [1.5, 0.0, 0.83]), ('cube_green_2', [1.5, 0.32, 0.83])
# ])
reoder_pose = OrderedDict([('cube_blue_1', [1.8, 0.0, 0.80]), ('cube_blue_2', [1.8, -0.15, 0.80]),
('cube_red_1', [1.8, 0.15, 0.80]), ('cube_red_2', [1.8, 0.3, 0.80]),
('cube_green_1', [1.8, -0.3, 0.80]), ('cube_green_2', [2.0, 0.32, 0.80])
])
SUM_DIR = '../SUM_DIR'
TRAIN_DIR = '../TRAIN_DIR'
class DDPG(object):
def __init__(self):
pass
@staticmethod
def execute(sess, actor, critic, train = True):
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(SUM_DIR)
summary_ops = tf.summary.merge_all()
if os.path.isfile(TRAIN_DIR + '/checkpoint'):
saver.restore(sess, TRAIN_DIR + '/model_pick.ckpt')
print('Restored')
from demo import Demo, GazeboClient
rospy.init_node('demo')
env = Demo()
if train:
actor.update_target_network()
critic.update_target_network()
replaybuffer = Memory(BUFFER_SIZE, RANDOM_SEED)
if os.path.isfile(PARAM_FILE):
data = Csv(PARAM_FILE).read()
epsilon = float(data['epsilon'][-1])
start = int(data['episode'][-1])
else:
epsilon = 0.8
start = 0
gazebo_client = GazeboClient(['ground_plane', 'fetch'])
gazebo_client.delete_gazebo_sdf_models()
load_models = env.gazebo_client.initial_load_gazebo_models()
gazebo_client.skip_models.append('building_0')
gazebo_client.skip_models.append('cafe_table_scaled_0')
for i in range(start, EPISODES):
#
# for i in range(EPISODES):
delete_models_name = [names for names in gazebo_client.get_model_states().name if
names.startswith('cube')]
delete_models_path = [names[:-2] for names in delete_models_name]
env.gazebo_client.delete_gazebo_sdf_models(delete_models_name)
env.gazebo_client.shuffle_models(delete_models_name, delete_models_path)
env.cubes_bottom_top()
s_t = env.reset()
total_action_attempt = 0
total_grasp_attempt = 0
total_place_attempt = 0
total_grasp_success = 0
total_place_success = 0
total_reward = 0.0
if s_t is not None:
for j in range(STEPS):
epsilon -= 0.7 / 1000.0
if np.random.random() > epsilon:
a_type = "Exploit"
a = actor.predict(s_t.reshape(-1, 84, 84, 3)).reshape(2, 3)
else:
a_type = "Explore"
a = np.random.random_sample([2, 3])
action = np.argmax(a, axis = 1)
print(action)
s_t1, r, terminal, update, grasp_attempt, \
grasp_success, place_attempt, place_success = env.step(list(action))
total_action_attempt += 1
try:
total_reward += r
except:
pass
total_grasp_attempt += int(grasp_attempt)
total_grasp_success += int(grasp_success)
total_place_attempt += int(place_attempt)
total_place_success += int(place_success)
# print('j: ', j,'Rewards: ', r)
if update:
replaybuffer.add(s_t.reshape([84, 84, 3]), a, r, terminal, s_t1.reshape([84, 84, 3]))
if replaybuffer.size() >= MINIBATCH_SIZE:
s_batch, a_batch, r_batch, t_batch, s2_batch = replaybuffer.sample_batch(MINIBATCH_SIZE)
target_q = critic.predict_target(np.array(s2_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]))
y_i = []
for k in range(MINIBATCH_SIZE):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + GAMMA_FACTOR * target_q[k])
critic.train(np.array(s_batch).reshape([-1, 84, 84, 3]),
np.array(a_batch).reshape([-1, 2, 3]),
np.reshape(y_i, (-1, 1)))
a_outs = actor.predict(np.array(s_batch).reshape([-1, 84, 84, 3]))
grads = critic.action_gradients(np.array(s_batch).reshape([-1, 84, 84, 3]), a_outs)
actor.train(np.array(s_batch).reshape([-1, 84, 84, 3]), grads[0])
actor.update_target_network()
critic.update_target_network()
if terminal:
break
s_t = s_t1
saver.save(sess, TRAIN_DIR + '/model_pick.ckpt')
Csv(PARAM_FILE).write(headers = ['epsilon', 'episode'], rows = [[epsilon], [i]], mode = 'w')
Csv(SUM_FILE).write(headers = ['episode', 'rewards', 'total_action_attempt', 'total_grasp_attempt',
'total_grasp_success', 'total_place_attempt', 'total_place_success'],
rows = [[int(i)], [float(total_reward)], [int(total_action_attempt)],
[int(total_grasp_attempt)], [int(total_grasp_success)],
[int(total_place_attempt)], [int(total_place_success)]],
mode = 'a')
try:
print ('Episode %d , Reward: %f , Epsilon: %f' % (i, total_reward, epsilon))
except:
pass
def conv2d(input, weight_shape, bias_shape):
stdev = weight_shape[0] * weight_shape[1] * weight_shape[2]
W = tf.get_variable("W", initializer = tf.truncated_normal(shape = weight_shape, stddev = 2 / np.sqrt(stdev)))
bias_init = tf.constant_initializer(value = 0)
b = tf.get_variable("b", bias_shape, initializer = bias_init)
conv_out = tf.nn.conv2d(input, W, strides = [1, 4, 4, 1], padding = 'VALID')
return tf.nn.relu(tf.nn.bias_add(conv_out, b))
class ActorNetwork(object):
def __init__(self, sess, state_dim, action_dim, learning_rate, tau):
self.sess = sess
self.s_dim = state_dim
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.actor_inputs_dirt, self.actor_weights, self.actor_out = self.create_actor_network('actor_network')
self.target_actor_inputs_dirt, self.target_actor_weights, self.target_actor_out = self.create_actor_network(
'actor_target')
self.update_target_network_params = \
[self.target_actor_weights[i].assign(tf.multiply(self.actor_weights[i], self.tau) +
tf.multiply(self.target_actor_weights[i], 1. - self.tau))
for i in range(len(self.target_actor_weights))]
self.action_gradient |
PARAM_FILE = '../param/training_parameters_2018_06_20.csv'
SUM_FILE = '../SUM_DIR/summary_2018_06_20.csv' | random_line_split | |
core.ts | -post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id;
const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise();
const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
| f (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubShortLivedAccessToken(
privateKey: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cache && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis | conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
i | conditional_block |
core.ts | -post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id;
const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise();
const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
if (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubShortLivedAccessToken(
privateKey: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cac | getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis | he && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function | identifier_body |
core.ts | js-post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id; | const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
if (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubShortLivedAccessToken(
privateKey: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cache && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis"
| const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise(); | random_line_split |
core.ts | -post-processor**@1234abcd**
// TODO: read this from OwlBot.yaml.
_CONTAINER: args.image,
_DEFAULT_BRANCH: args.defaultBranch ?? 'master',
},
},
});
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const buildId: string = (resp as any).metadata.build.id;
const detailsURL = detailsUrlFrom(buildId, project);
try {
// TODO(bcoe): work with fenster@ to figure out why awaiting a long
// running operation does not behave as expected:
// const [build] = await resp.promise();
const build = await waitForBuild(project, buildId, cb);
return {detailsURL, ...summarizeBuild(build)};
} catch (e) {
const err = e as Error;
logger.error(`triggerPostProcessBuild: ${err.message}`, {
stack: err.stack,
});
return buildFailureFrom(err, detailsURL);
}
}
function summarizeBuild(
build: google.devtools.cloudbuild.v1.IBuild
): BuildSummary {
if (!build.steps) throw Error('trigger contained no steps');
const successMessage = `successfully ran ${build.steps.length} steps 🎉!`;
let conclusion: 'success' | 'failure' = 'success';
let summary = successMessage;
let text = '';
let failures = 0;
for (const step of build.steps) {
if (step.status !== 'SUCCESS') {
conclusion = 'failure';
summary = `${++failures} steps failed 🙁`;
text += `❌ step ${step.name} failed with status ${step.status}\n`;
}
}
if (conclusion === 'success') {
text = `successfully ran ${build.steps.length} steps 🎉!`;
}
return {
conclusion,
summary,
text,
};
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function buildFailureFrom(error: any, detailsUrl: string): BuildResponse {
if (typeof error.name === 'string' && typeof error.message === 'string') {
return {
conclusion: 'failure',
summary: error.name,
text: error.message,
detailsURL: detailsUrl,
};
} else {
return {
conclusion: 'failure',
summary: 'unknown build failure',
text: 'unknown build failure',
detailsURL: detailsUrl,
};
}
}
// Helper to build a link to the Cloud Build job, which peers in DPE
// can use to view a given post processor run:
function detailsUrlFrom(buildID: string, project: string): string {
return `https://console.cloud.google.com/cloud-build/builds;region=global/${buildID}?project=${project}`;
}
class TimeoutError extends Error {
name = 'TimeoutError';
}
async function waitForBuild(
projectId: string,
id: string,
client: CloudBuildClient
): Promise<google.devtools.cloudbuild.v1.IBuild> {
// This loop is set to equal a total of 3 hours, which should
// match the timeout in cloud-build/update-pr.yaml's timeout
for (let i = 0; i < TOTAL_PINGS; i++) {
const [build] = await client.getBuild({projectId, id});
if (build.status !== 'WORKING' && build.status !== 'QUEUED') {
return build;
}
// Wait a few seconds before checking the build status again:
await new Promise(resolve => {
const delay = PING_DELAY;
setTimeout(() => {
return resolve(undefined);
}, delay);
});
}
throw new TimeoutError(`timed out waiting for build ${id}`);
}
export async function getHeadCommit(
owner: string,
repo: string,
pr: number,
octokit: Octokit
): Promise<Commit | undefined> {
let headCommit: Commit | undefined = undefined;
for await (const {data: commits} of octokit.paginate.iterator(
octokit.pulls.listCommits,
{
owner,
repo,
pull_number: pr,
per_page: 250,
}
)) {
headCommit = commits[commits.length - 1];
}
return headCommit;
}
export async function createCheck(
args: CheckArgs,
octokit?: Octokit,
logger: GCFLogger = defaultLogger
) {
if (!octokit) {
octokit = await core.getAuthenticatedOctokit({
privateKey: args.privateKey,
appId: args.appId,
installation: args.installation,
});
}
const [owner, repo] = args.repo.split('/');
const prName = `${args.repo} #${args.pr}`;
const headCommit = await getHeadCommit(owner, repo, Number(args.pr), octokit);
if (!headCommit) {
logger.warn(`No commit found for ${prName}.`);
return;
}
const response = await octokit.checks.create({
owner,
repo,
name: 'OwlBot Post Processor',
summary: args.summary,
head_sha: headCommit.sha as string,
conclusion: args.conclusion,
details_url: args.detailsURL,
output: {
title: args.title,
summary: args.summary,
text: args.text,
},
});
if (201 === response.status) {
logger.info(`Created check for ${prName}: ${response.data.html_url}`);
} else {
logger.error(
`Failed to create check for ${prName}. ` +
`Status: ${response.status}.\n` +
JSON.stringify(response)
);
}
}
export async function getGitHubSh | Key: string,
appId: number,
installation: number
): Promise<Token> {
const payload = {
// issued at time
// Note: upstream API seems to fail if decimals are included
// in unixtime, this is why parseInt is run:
iat: parseInt('' + Date.now() / 1000),
// JWT expiration time (10 minute maximum)
exp: parseInt('' + Date.now() / 1000 + 10 * 60),
// GitHub App's identifier
iss: appId,
};
const jwt = sign(payload, privateKey, {algorithm: 'RS256'});
const resp = await request<Token>({
url: getAccessTokenURL(installation),
method: 'POST',
headers: {
Authorization: `Bearer ${jwt}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (resp.status !== 201) {
throw Error(`unexpected response http = ${resp.status}`);
} else {
return resp.data;
}
}
export function getAccessTokenURL(installation: number) {
return `https://api.github.com/app/installations/${installation}/access_tokens`;
}
let cachedOctokit: Octokit;
export async function getAuthenticatedOctokit(
auth: string | AuthArgs,
cache = true
): Promise<Octokit> {
if (cache && cachedOctokit) return cachedOctokit;
let tokenString: string;
if (auth instanceof Object) {
const token = await getGitHubShortLivedAccessToken(
auth.privateKey,
auth.appId,
auth.installation
);
tokenString = token.token;
} else {
tokenString = auth;
}
const octokit = new Octokit({
auth: tokenString,
});
if (cache) cachedOctokit = octokit;
return octokit;
}
function getCloudBuildInstance() {
return new CloudBuildClient();
}
/*
* Load OwlBot lock file from .github/.OwlBot.lock.yaml.
* TODO(bcoe): abstract into common helper that supports .yml.
*
* @param {string} repoFull - repo in org/repo format.
* @param {number} pullNumber - pull request to base branch on.
* @param {Octokit} octokit - authenticated instance of Octokit.
*/
export async function fetchOwlBotLock(
repoFull: string,
pullNumber: number,
octokit: Octokit
): Promise<string | undefined> {
const [owner, repo] = repoFull.split('/');
const {data: prData} = await octokit.pulls.get({
owner,
repo,
pull_number: pullNumber,
});
if (!prData?.head?.repo?.full_name)
throw Error(`invalid response ${owner}/${repo} pr=${pullNumber}`);
const [prOwner, prRepo] = prData.head.repo.full_name.split('/');
const configString = await getFileContent(
prOwner,
prRepo,
OWL_BOT_LOCK_PATH,
prData.head.ref,
octokit
);
return configString;
}
export function parseOwlBotLock(configString: string): OwlBotLock {
const maybeOwlBotLock = load(configString);
if (maybeOwlBotLock === null || typeof maybeOwlBotLock !== 'object') {
throw new Error(`Lock file did not parse correctly. Expected an object.
Found ${maybeOwlBotLock}
while parsing
${configString}`);
}
return owlBotLockFrom(maybeOwlBotLock);
}
/**
* Octokit makes it surprisingly difficult to fetch the content for a file.
* This function makes it easier.
* @param owner the github org or user; ex: "googleapis | ortLivedAccessToken(
private | identifier_name |
main.py | print("GGF. stimmt etwas mit dem Pfad für die Ordnererstellung nicht.")
def write_into_csv_file(new_directory_name, output_csv_filename, all_texts, output_count_pos_tags):
#Wir versehen jedes Wort in jeder der .xml Dateien mit dem von Spacy erkannten Part-of-Speech Tag
#Die Satzenden finden wir mit "doc.sents" und geben wir an den entsprechenden Stellen in der .csv Datei an
#Wir zählen parallel die Häufigkeit der verschiedenen PoS Tags und schreiben diese in ein anderes File.
#Wir zählen hier auch die Satzlängen
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
os.chdir(path)
full_output_csv_name = str(output_csv_filename) + ".csv"
with open(full_output_csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Wort:', 'PoS:', 'Satzende:'])
counter_for_pos = dict()
count_length_of_different_sentences = dict()
for text in all_texts:
thewriter.writerow(["Begin New File"])
doc = nlp(text)
for sentence in doc.sents:
#print("Ein Satz beginnt")
counter = 0
tokens_in_sentence = len(sentence)
words_in_sentence = 0
for token in sentence:
if token.pos_ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_trigg | ist_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_counted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list | ers_for_os_links[potential_match[1]] = 1
double_l | conditional_block |
main.py | _ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_triggers_for_os_links[potential_match[1]] = 1
double_list_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_counted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_motion_verb_into_csv(dict_with_motion_text):
# Hier schreiben wir die in anderen Funktionen gezählten Motion Verben in eine .csv-Datei
csv_name = "output_counted_motion_verbs.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict(Counter(dict_with_motion_text).most_common(5)):
thewriter.writerow([entry, dict_with_motion_text.get(entry)])
def create_graph_for_sentence_lengths(dict_with_sentence_lengths):
#Hier stellen wir die Verteilung Satzlänge graphisch dar und speichern das Bild
x = []
y = []
for entry in dict_with_sentence_lengths:
x.append(entry)
y.append(dict_with_sentence_lengths.get(entry))
plt.bar(x, y, align='center')
| plt.title("Anzahl der Wörter pro Satz")
plt.xlabel("Satzlänge")
plt.ylabel("Häufigkeit") | random_line_split | |
main.py | der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_triggers_for_os_links[potential_match[1]] = 1
double_list_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_counted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_motion_verb_into_csv(dict_with_motion_text):
# Hier schreiben wir die in anderen Funktionen gezählten Motion Verben in eine .csv-Datei
csv_name = "output_counted_motion_verbs.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict(Counter(dict_with_motion_text).most_common(5)):
thewriter.writerow([entry, dict_with_motion_text.get(entry)])
def create_graph_for_sentence_lengths(dict_with_sentence_lengths):
#Hier stellen wir die Verteilung Satzlänge graphisch dar und speichern das Bild
x = []
y = []
for entry in dict_with_sentence_lengths:
x.append(entry)
y.append(dict_with_sentence_lengths.get(entry))
plt.bar(x, y, align='center')
plt.title("Anzahl der Wörter pro Satz")
plt.xlabel("Satzlänge")
plt.ylabel("Häufigkeit")
plt.savefig('Verteilung_der_satzlaenge.png', dpi=300, bbox_inches='tight')
#plt.show()
def do_part_2_2_vorverarbeitung(all_texts):
#Eine kleine Sub-funktione, welche den Output-Ordner erstellt und
#dafür sorgt, dass das die PoS-Tags gezählt und geschrieben werden und dabei
#zählen wir die Satzlängen und geben diese in einem dict zurück
create_dir_for_saving_data("output_data")
dict_with_sentence_lengths = write_into_csv_file("output_data", "output_text_with_pos", all_texts, "output_count_pos_tags")
return dict_with_sentence_lengths
def main():
input_data = gi.get | _inp | identifier_name | |
main.py | ellen in der .csv Datei an
#Wir zählen parallel die Häufigkeit der verschiedenen PoS Tags und schreiben diese in ein anderes File.
#Wir zählen hier auch die Satzlängen
cur_abs_path = os.path.abspath("")
path = os.path.join(cur_abs_path, new_directory_name)
os.chdir(path)
full_output_csv_name = str(output_csv_filename) + ".csv"
with open(full_output_csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Wort:', 'PoS:', 'Satzende:'])
counter_for_pos = dict()
count_length_of_different_sentences = dict()
for text in all_texts:
thewriter.writerow(["Begin New File"])
doc = nlp(text)
for sentence in doc.sents:
#print("Ein Satz beginnt")
counter = 0
tokens_in_sentence = len(sentence)
words_in_sentence = 0
for token in sentence:
if token.pos_ != "PUNCT": #Count amount of 'real' words in a sentence
words_in_sentence += 1
if token.pos_ in counter_for_pos:
counter_for_pos[token.pos_] += 1
else:
counter_for_pos[token.pos_] = 1
counter += 1
if counter != tokens_in_sentence:
thewriter.writerow([token.text, token.pos_])
else:
thewriter.writerow([token.text, token.pos_, "Satzende"])
if words_in_sentence in count_length_of_different_sentences:
count_length_of_different_sentences[words_in_sentence] += 1
else:
count_length_of_different_sentences[words_in_sentence] = 1
#Hier schreiben wir die Häufigkeit der PoS Tags in eine extra Datei.
output_count_pos_tags_csv = str(output_count_pos_tags) + ".csv"
with open(output_count_pos_tags_csv, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Part-of-Speech', 'Anzahl'])
for pos_part in counter_for_pos:
thewriter.writerow([pos_part, counter_for_pos.get(pos_part)])
return count_length_of_different_sentences
def count_locations(all_texts):
#Diese Funktion zählt die Häufigkeiten von der Entität Locations
ent_dict = dict()
for text in all_texts:
doc = nlp(text)
for token in doc:
if token.ent_type_ != "":
if token.ent_type_ in ent_dict:
ent_dict[token.ent_type_] += 1
else:
ent_dict[token.ent_type_] = 1
locations_amount = ["LOC", ent_dict.get("LOC")]
return locations_amount
def get_tags_from_xml(abs_paths_to_xml_data):
#Diese Funktion erfüllt vieles für die 2.3
#Hier zählen wir die Häufigkeit der verschiedenen QsLink Typen
#Wir sammeln außerdem die Präpositionen, welche durch SPATIAL_SIGNAl angegeben werden
#Wir werten aus, welche Links durch welche Präpositionen getriggert werden und wie oft das passiert
#Und wir zählen hier die Anzahl der verschiedenen MOTION verben um die 5 häufigsten zu finden
read_all = 1
sub_tag_dict = dict()
sub_tag_QS_link_types = dict()
praeposition_triggers_for_qs_links = dict()
praeposition_triggers_for_os_links = dict()
count_motion_verb = dict()
for xml_file in abs_paths_to_xml_data:
tree = ElementTree.parse(xml_file)
root = tree.getroot()
list_with_qs_and_o_link_trigger_ids = []
list_of_spatial_signal_ids_and_words = []
for elem in root:
if elem.tag == 'TAGS':
for sub_tag in elem:
if sub_tag.tag in sub_tag_dict: #Counts all the different Types of Tags
sub_tag_dict[sub_tag.tag] += 1
else:
sub_tag_dict[sub_tag.tag] = 1
if sub_tag.tag == "QSLINK": #Counts all the different relTypes of QSLinks
#val = sub_tag.attrib['relType']
if sub_tag.attrib['relType'] in sub_tag_QS_link_types:
sub_tag_QS_link_types[sub_tag.attrib['relType']] += 1
else:
sub_tag_QS_link_types[sub_tag.attrib['relType']] = 1
if sub_tag.tag == "MOTION": #Counts all the different words for motion
if sub_tag.attrib['text'] in count_motion_verb:
count_motion_verb[sub_tag.attrib['text']] += 1
else:
count_motion_verb[sub_tag.attrib['text']] = 1
if sub_tag.tag == "QSLINK" or sub_tag.tag == "OLINK": #Here we start to collect the IDS
type_of_link = sub_tag.tag #for QS and OSlink matches
trigger_id = sub_tag.attrib["trigger"] #to find the trigger-praepositions
list_with_qs_and_o_link_trigger_ids.append([type_of_link, trigger_id])
if sub_tag.tag == "SPATIAL_SIGNAL":
trigger_id = sub_tag.attrib["id"]
word_trigger = sub_tag.attrib["text"]
list_of_spatial_signal_ids_and_words.append([trigger_id, word_trigger])
for potential_match in list_of_spatial_signal_ids_and_words:
for potential_signal_link in list_with_qs_and_o_link_trigger_ids:
if potential_match[0] == potential_signal_link[1]:
if potential_signal_link[0] == "QSLINK":
if potential_match[1] in praeposition_triggers_for_qs_links:
praeposition_triggers_for_qs_links[potential_match[1]] += 1
else:
praeposition_triggers_for_qs_links[potential_match[1]] = 1
else: #=OSLINK
if potential_match[1] in praeposition_triggers_for_os_links:
praeposition_triggers_for_os_links[potential_match[1]] += 1
else:
praeposition_triggers_for_os_links[potential_match[1]] = 1
double_list_with_qs_and_os_counted_trigger_lists = [praeposition_triggers_for_qs_links, praeposition_triggers_for_os_links]
if read_all == 0:
break
return_list = [sub_tag_dict, sub_tag_QS_link_types, double_list_with_qs_and_os_counted_trigger_lists, count_motion_verb]
return return_list
def write_counted_tags_and_loc_into_csv(tags_in_dict_counted, get_location_amount):
#Hier schreiben wir die in anderen Funktionen gezählten Tags und Locations in eine .csv-Datei
csv_name_for_counted_tags = "output_counted_tags_and_loc.csv"
with open(csv_name_for_counted_tags, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in tags_in_dict_counted:
if entry in ["SPATIAL_ENTITY", "PLACE", "MOTION", "SPATIAL_SIGNAL", "MOTION_SIGNAL","QSLINK", "OLINK"]:
thewriter.writerow([entry, tags_in_dict_counted.get(entry)])
thewriter.writerow(['Locations', get_location_amount[1]])
signal_number = tags_in_dict_counted['SPATIAL_SIGNAL'] + tags_in_dict_counted['MOTION_SIGNAL']
thewriter.writerow(['Signals', signal_number])
def write_counted_qslink_types_into_csv(dict_with_qs_link_types):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink Typen in eine .csv-Datei
csv_name_for_qs_link_types = "output_counted_qslink_types.csv"
with open(csv_name_for_qs_link_types, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Name:', 'Anzahl:'])
for entry in dict_with_qs_link_types:
if entry == "":
thewriter.writerow(["No Type specified", dict_with_qs_link_types.get(entry)])
else:
thewriter.writerow([entry, dict_with_qs_link_types.get(entry)])
def write_counted_qslink_and_oslink_praep_word_triggers_into_csv(list_with_dicts_for_qs_and_os_link_triggers):
# Hier schreiben wir die in anderen Funktionen gezählten QsLink und OsLink Trigger in eine .csv-Datei
csv_name = "output_co | unted_qs_and_os_link_praep_triggers.csv"
with open(csv_name, 'w', newline='') as myfile:
thewriter = csv.writer(myfile)
thewriter.writerow(['Linktyp:', 'QsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[0]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[0].get(entry)])
thewriter.writerow([''])
thewriter.writerow(['Linktyp:', 'OsLink'])
thewriter.writerow(['Triggerwort:', 'Anzahl der Triggerungen:'])
for entry in list_with_dicts_for_qs_and_os_link_triggers[1]:
thewriter.writerow([entry, list_with_dicts_for_qs_and_os_link_triggers[1].get(entry)])
def write_counted_ | identifier_body | |
conn.go | ) (*RUDPConn, error) {
c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient | else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) send() {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = | {
c.clientRecv()
} | conditional_block |
conn.go | c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient {
c.clientRecv()
} else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) send() {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr |
// DialRUDP client dial server, building a relieable connection
func DialRUDP(localAddr, remoteAddr *net.UDPAddr) (*RUDPConn, error) { | random_line_split | |
conn.go | ("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient {
c.clientRecv()
} else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) send() {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = connStatusConnecting
c.recvPacketChannel = make(chan *packet, 1<<5)
c.sendPacketChannel = make(chan *packet, 1<<5)
c.rawUDPDataChan = make(chan []byte, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
go c.errWatcher()
// net io
go c.send()
go c.recv()
go c.resend()
go c.packetHandler()
return c, nil
}
func (c *RUDPConn) Read(b []byte) (int, error) | {
readCnt := len(b)
n := len(b)
if n == 0 {
return 0, nil
}
curWrite := 0
if len(c.outputDataTmpBuffer) != 0 {
if n <= len(c.outputDataTmpBuffer) {
copy(b, c.outputDataTmpBuffer[:n])
c.outputDataTmpBuffer = c.outputDataTmpBuffer[n:]
return readCnt, nil
} else {
n -= len(c.outputDataTmpBuffer)
curWrite += len(c.outputDataTmpBuffer)
copy(b, c.outputDataTmpBuffer)
}
}
for n > 0 { | identifier_body | |
conn.go | ) (*RUDPConn, error) {
c := &RUDPConn{}
c.localAddr = localAddr
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeClient
c.sendSeqNumber = 0
c.recvPacketChannel = make(chan *packet, 1<<5)
c.rudpConnStatus = connStatusConnecting
if err := c.clientBuildConn(); err != nil {
return nil, err
}
c.sendPacketChannel = make(chan *packet, 1<<5)
c.resendPacketQueue = newPacketList(packetListOrderBySeqNb)
c.outputPacketQueue = newPacketList(packetListOrderBySeqNb)
c.rudpConnStatus = connStatusOpen
c.sendTickNano = defaultSendTickNano
c.sendTickModifyEvent = make(chan int32, 1)
c.heartBeatCycleMinute = defaultHeartBeatCycleMinute
c.lastSendTs = time.Now().Unix()
c.lastRecvTs = time.Now().Unix()
c.sendStop = make(chan error, 1)
c.recvStop = make(chan error, 1)
c.resendStop = make(chan error, 1)
c.packetHandlerStop = make(chan error, 1)
c.heartbeatStop = make(chan error, 1)
c.errBus = make(chan error, 1)
c.closeConnCallback = func() {
c.rudpConnStatus = connStatusClose
c.errBus <- io.EOF
}
// monitor errBus
go c.errWatcher()
// net io
go c.recv()
go c.send()
go c.resend()
go c.packetHandler()
// client need to keep a heart beat
go c.keepLive()
log("build the RUDP connection succ!\n")
return c, nil
}
func (c *RUDPConn) errWatcher() {
err := <-c.errBus
log("errBus recv error: %v\n", err)
c.err = err
c.resendStop <- err
c.recvStop <- err
c.sendStop <- err
if c.rudpConnType == connTypeClient {
c.heartbeatStop <- err
}
if c.rudpConnType == connTypeServer {
c.closeConnCallbackListener()
}
}
func (c *RUDPConn) keepLive() {
ticker := time.NewTicker(time.Duration(c.heartBeatCycleMinute) * 60 * time.Second)
defer ticker.Stop()
select {
case <-ticker.C:
now := time.Now().Unix()
if now-c.lastSendTs >= int64(c.heartBeatCycleMinute)*60 {
c.sendPacketChannel <- newPinPacket()
}
case <-c.heartbeatStop:
return
}
}
func (c *RUDPConn) recv() {
if c.rudpConnType == connTypeClient {
c.clientRecv()
} else if c.rudpConnType == connTypeServer {
c.serverRecv()
}
}
func (c *RUDPConn) clientRecv() {
for {
select {
case <-c.recvStop:
return
default:
buf := make([]byte, rawUDPPacketLenLimit)
n, err := c.rawUDPConn.Read(buf)
if err != nil {
c.errBus <- err
return
}
buf = buf[:n]
apacket, err := unmarshalRUDPPacket(buf)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
func (c *RUDPConn) serverRecv() {
for {
select {
case <-c.recvStop:
return
case data := <-c.rawUDPDataChan:
apacket, err := unmarshalRUDPPacket(data)
if err != nil {
c.errBus <- resolveRUDPSegmentErr
return
}
c.recvPacketChannel <- apacket
}
}
}
// handle the recv packets
func (c *RUDPConn) packetHandler() {
for {
select {
case <-c.packetHandlerStop:
return
case apacket := <-c.recvPacketChannel:
switch apacket.segmentType {
case rudpSegmentTypeNormal:
if apacket.seqNumber <= c.maxHasReadSeqNumber {
// discard
continue
}
c.outputPacketQueue.putPacket(apacket)
// ack
c.sendPacketChannel <- newAckPacket(apacket.seqNumber)
case rudpSegmentTypeAck:
log("ack %d\n", apacket.ackNumber)
c.resendPacketQueue.removePacketByNb(apacket.ackNumber)
case rudpSegmentTypeFin:
c.errBus <- io.EOF
return
case rudpSegmentTypePin:
// do nothing
case rudpSegmentTypeConn:
if c.rudpConnType != connTypeServer {
continue
}
// server send CON ack segment
segment := newConAckPacket(apacket.seqNumber).marshal()
n, err := c.write(segment)
if err != nil {
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// build conn
log("server send CON-ACK segment\n")
c.rudpConnStatus = connStatusOpen
c.buildConnCallbackListener()
}
c.lastRecvTs = time.Now().Unix()
}
}
}
func (c *RUDPConn) | () {
ticker := time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
defer ticker.Stop()
for {
select {
case c.sendTickNano = <-c.sendTickModifyEvent:
ticker.Stop()
ticker = time.NewTicker(time.Duration(c.sendTickNano) * time.Nanosecond)
case <-c.sendStop:
return
case <-ticker.C:
c.sendPacket()
c.lastSendTs = time.Now().Unix()
}
}
}
// SetRealSendTick modify the segment sending cycle
func (c *RUDPConn) SetSendTick(nano int32) {
c.sendTickModifyEvent <- nano
}
func (c *RUDPConn) write(data []byte) (n int, err error) {
if c.rudpConnType == connTypeServer {
n, err = c.rawUDPConn.WriteTo(data, c.remoteAddr)
} else {
n, err = c.rawUDPConn.Write(data)
}
return
}
func (c *RUDPConn) sendPacket() {
apacket := <-c.sendPacketChannel
segment := apacket.marshal()
n, err := c.write(segment)
if err != nil {
log("sendPacket error: %v, %d", err, len(segment))
c.errBus <- err
return
}
if n != len(segment) {
c.errBus <- errors.New(RawUDPSendNotComplete)
return
}
// apacket.print()
// only the normal segment possiblely needs to resend
if apacket.segmentType == rudpSegmentTypeNormal {
c.resendPacketQueue.putPacket(apacket)
}
}
func (c *RUDPConn) clientBuildConn() error {
// just init instance
udpConn, err := net.DialUDP("udp", c.localAddr, c.remoteAddr)
if err != nil {
return err
}
c.rawUDPConn = udpConn
c.rawUDPConn.SetWriteBuffer(65528)
// send conn segment
connSeqNb := c.sendSeqNumber
c.sendSeqNumber++
connSegment := newConPacket(connSeqNb).marshal()
n, err := udpConn.Write(connSegment)
if err != nil {
return err
}
if n != len(connSegment) {
return errors.New(RawUDPSendNotComplete)
}
log("client send the CONN segment\n")
// wait the server ack conn segment
// may the server's ack segment and normal segment out-of-order
// so if the recv not the ack segment, we try to wait the next
for cnt := 0; cnt < maxWaitSegmentCntWhileConn; cnt++ {
buf := make([]byte, rawUDPPacketLenLimit)
n, err = udpConn.Read(buf)
if err != nil {
return err
}
recvPacket, err := unmarshalRUDPPacket(buf[:n])
if err != nil {
return errors.New("analyze the recvSegment error: " + err.Error())
}
if recvPacket.ackNumber == connSeqNb && recvPacket.segmentType == rudpSegmentTypeConnAck {
// conn OK
log("client recv the server CON-ACK segment\n")
return nil
} else {
//c.recvPacketChannel <- recvPacket
continue
}
}
return nil
}
func serverBuildConn(rawUDPConn *net.UDPConn, remoteAddr *net.UDPAddr) (*RUDPConn, error) {
c := &RUDPConn{}
c.rawUDPConn = rawUDPConn
c.rawUDPConn.SetWriteBuffer(65528)
c.localAddr, _ = net.ResolveUDPAddr(rawUDPConn.LocalAddr().Network(), rawUDPConn.LocalAddr().String())
c.remoteAddr = remoteAddr
c.rudpConnType = connTypeServer
c.sendSeqNumber = 0
c.rudpConnStatus = | send | identifier_name |
plotXsectLimitComparison.py | os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
typeName = "FP"
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
"--fermiophobic",
dest="fermiophobicMode",
help="exclude gluon fusion and ttbar associated production and rescale the other two processes such that " + \
"their total cross section corresponds to the previous total",
default=False,
action="store_true",
)
parser.add_option(
"--save",
dest="outputFnames",
help="save the plot into a file (can be specified multiple times)",
default=[],
action="append",
)
parser.add_option(
"--plotLog",
help="plots y in log scale",
default=False,
action="store_true",
)
parser.add_option( | )
parser.add_option(
"--relative",
help="instead of plotting the absolute cross section exclusions, plot the relative (w.r.t. to the input signal)",
default=False,
action="store_true",
)
parser.add_option(
"--isabs",
help="specify that a given file contains ABSOLUTE rather than RELATIVE (w.r.t. to the standard cross section) limits. | "--ymax",
type=float,
help="manually sepcify the y scale",
default=None, | random_line_split |
plotXsectLimitComparison.py | .path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
|
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
"--fermiophobic",
dest="fermiophobicMode",
help="exclude gluon fusion and ttbar associated production and rescale the other two processes such that " + \
"their total cross section corresponds to the previous total",
default=False,
action="store_true",
)
parser.add_option(
"--save",
dest="outputFnames",
help="save the plot into a file (can be specified multiple times)",
default=[],
action="append",
)
parser.add_option(
"--plotLog",
help="plots y in log scale",
default=False,
action="store_true",
)
parser.add_option(
"--ymax",
type=float,
help="manually sepcify the y scale",
default=None,
)
parser.add_option(
"--relative",
help="instead of plotting the absolute cross section exclusions, plot the relative (w.r.t. to the input signal)",
default=False,
action="store_true",
)
parser.add_option(
"--isabs",
help="specify that a given file contains ABSOLUTE rather than RELATIVE (w.r.t. to the standard cross section) limits. | typeName = "FP" | conditional_block |
plotXsectLimitComparison.py |
while True:
color += 1
if color not in colorsToAvoid:
break
# define a name: if there is a comma in the file name assume that the
# part before the comma is the actual file name and the part after it
# is the label we should use
#
# if there is no comma, just use the basename (without .csv) as label
pos = fname.find(',')
if pos == -1:
# not found
name = os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
typeName = "FP"
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
from optparse import OptionParser
parser = OptionParser("""
%prog [options] csv_file1 csv_file2 [ ... ]
compares two or more cross section ratio limit
output files on one plot
"""
)
parser.add_option(
| """ @param relative if true, the exclusion of the ratios
(relative to the inputs given) are plotted. If False,
these ratios are multiplied by the SM cross sections
and branching ratios into gamma gamma
@param inputIsAbs is a list (set) of file names which
should be treated as if they had ABSOLUTE limits on
cross sections rather than relative limits.
@param minMass and masMass can be used to restrict the
plotting range
"""
#--------------------
# read the files
#--------------------
data = []
color = 0
for fname in csvFnames: | identifier_body | |
plotXsectLimitComparison.py | (csvFnames, relative, includeExpected = True, fermiophobic = None, ymax = None, inputIsAbs = None, drawXsectBR = False,
minMass = None,
maxMass = None,
plotLog = False
):
""" @param relative if true, the exclusion of the ratios
(relative to the inputs given) are plotted. If False,
these ratios are multiplied by the SM cross sections
and branching ratios into gamma gamma
@param inputIsAbs is a list (set) of file names which
should be treated as if they had ABSOLUTE limits on
cross sections rather than relative limits.
@param minMass and masMass can be used to restrict the
plotting range
"""
#--------------------
# read the files
#--------------------
data = []
color = 0
for fname in csvFnames:
while True:
color += 1
if color not in colorsToAvoid:
break
# define a name: if there is a comma in the file name assume that the
# part before the comma is the actual file name and the part after it
# is the label we should use
#
# if there is no comma, just use the basename (without .csv) as label
pos = fname.find(',')
if pos == -1:
# not found
name = os.path.basename(fname)
if name.lower().endswith(".csv"):
name = name[:-4]
else:
name = fname[pos+1:]
fname = fname[:pos]
masses, observedValues, expectedValues, \
expected_minus_2_sigma_values, \
expected_minus_1_sigma_values, \
expected_plus_1_sigma_values, \
expected_plus_2_sigma_values = PlotUtils.readCSV(open(fname), includeExpected)
#--------------------
# filter on masses
#--------------------
indices = range(len(masses))
if minMass != None:
indices = [ i for i in indices if masses[i] >= minMass ]
if maxMass != None:
indices = [ i for i in indices if masses[i] <= maxMass ]
masses = [ masses[i] for i in indices ]
observedValues = [ observedValues[i] for i in indices ]
expectedValues = [ expectedValues[i] for i in indices ]
#--------------------
tmp = { "masses": masses,
"observedValues": observedValues,
"expectedValues": expectedValues,
# for labels
"name": name,
# assign the color here
"color": color,
}
data.append(tmp)
#--------------------
# just to make sure we're not picking up something in the code afterwards
del masses
del observedValues
del expectedValues
#--------------------
if not relative:
# if we're plotting the absolute cross sections, we
# need to know whether this is Standard Model or Fermiophobic
assert(fermiophobic != None)
if fermiophobic:
typeName = "FP"
else:
typeName = "SM"
# convert to absolute cross sections
for line, fname in zip(data, csvFnames):
if fname in inputIsAbs:
# already absolute
continue
line['observedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.multiplyArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
else:
# we're asked to plot relative results, convert to relative for those
# inputs which are absolute
for line, fname in zip(data, csvFnames):
if not fname in inputIsAbs:
# relative input, no need to convert
continue
line['observedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['observedValues'], fermiophobic)
line['expectedValues'] = PlotUtils.divideArrayByXsectAndBR(line['masses'], line['expectedValues'], fermiophobic)
#----------------------------------------
# legend
#----------------------------------------
legend = ROOT.TLegend(options.legendXleft, options.legendYbottom,
options.legendXright,options.legendYtop); gcSaver.append(legend)
legend.SetShadowColor(0);
legend.SetFillColor(0);
legend.SetBorderSize(0);
#----------------------------------------
# produce the 'observed' graphs
#----------------------------------------
allGraphs = []
for line in data:
gr = PlotUtils.makeGraphFromArrays(line['masses'], line['observedValues'])
line['grObserved'] = gr
gcSaver.append(gr)
if options.observedLineWidth > 0:
gr.SetLineWidth(options.observedLineWidth)
else:
# set default width for legend
gr.SetLineWidth(4)
gr.SetLineColor(line['color'])
legend.AddEntry(gr,line['name'],"L")
if options.observedLineWidth > 0:
allGraphs.append(gr)
#----------------------------------------
# produce the 'expected' graphs
#----------------------------------------
if includeExpected:
for line in data:
grExpected = PlotUtils.makeGraphFromArrays(line['masses'], line['expectedValues'])
gcSaver.append(grExpected)
line['grExpected'] = grExpected
grExpected.SetLineStyle(ROOT.kDashed)
grExpected.SetLineWidth(4)
grExpected.SetLineColor(line['color'])
allGraphs.append(grExpected)
# label = makeGraphLabelOnRight(grExpected, minMass, maxMass, "BG exp.")
# label.SetTextSize(label.GetTextSize() * 0.7)
# label.Draw()
# gcSaver.append(label)
#myCanvas = ROOT.TCanvas("myCanvas","Title Goes Here")
#myCanvas.SetLogy(plotLog)
#----------------------------------------
# produce the graph for the theoretical cross section
#----------------------------------------
if drawXsectBR:
# add a graph for the theoretical cross section
# take the 'or' of all masses given
import operator
allMasses = sorted(reduce(operator.add, [ line['masses'] for line in data ] ))
# for the moment, limit this to integer masses (in GeV)
# (the cross section interpolation seems not yet to be functional)
allMasses = sorted(list(set([ int(round(x)) for x in allMasses ])))
# print "allMasses=",allMasses
theoXsectBr = [ PlotUtils.getXsectTimesBR(mass, fermiophobic) for mass in allMasses ]
gr = PlotUtils.makeGraphFromArrays(allMasses, theoXsectBr)
gr.SetLineWidth(4)
gr.SetLineStyle(ROOT.kDotted)
legend.AddEntry(gr,"theo. #sigma * BR","L")
gcSaver.append(gr)
allGraphs.append(gr)
#----------------------------------------
# determine the y scale
#----------------------------------------
if ymax == None:
# determine this from the values, not from the graphs
# (is there a way to do this from the graphs ?)
ymax = max([value for line in data for value in line['observedValues'] ])
if includeExpected:
ymax = max(ymax, max([value for line in data for value in line['expectedValues'] ]))
ymax *= 1.1
# TODO: remove this if statement ?!
if not relative:
if fermiophobic:
# fix the y scale by hand in order not to
# stretch it too much because of large
# scaling factors for the theoretical expectation
ymax = 0.5
#----------------------------------------
# determine x scale (mass range)
#----------------------------------------
allMasses = [value for line in data for value in line['masses'] ]
actualMinMass = min(allMasses)
actualMaxMass = max(allMasses)
del allMasses
#----------------------------------------
# create a dummy histogram to set the x range
hdummy = ROOT.TH1F("hdummy","",1,actualMinMass,actualMaxMass)
gcSaver.append(hdummy)
hdummy.SetMaximum(ymax)
hdummy.Draw()
ROOT.gStyle.SetOptTitle(0)
#----------------------------------------
# draw the graphs
#----------------------------------------
for gr in allGraphs:
gr.Draw("C,same")
#gr.Draw("L,same")
#----------------------------------------
ROOT.gStyle.SetOptStat(0)
hdummy.SetXTitle("m_{H} [GeV/c^{2}]")
hdummy.GetYaxis().SetTitleOffset(1.2 * hdummy.GetYaxis().GetTitleOffset())
if relative:
hdummy.SetYTitle("#sigma/#sigma(theo)")
else:
hdummy.SetYTitle("#sigma(%s) * BR(#gamma#gamma) [pb]" % typeName)
ROOT.gPad.SetGrid()
if options.showTitle:
label = ROOT.TLatex(0.5,0.85,"Excluded at 95% CL.")
gcSaver.append(label)
label.SetNDC(1)
label.SetTextAlign(21)
label.Draw()
legend.Draw()
ROOT.gPad.Modified()
ROOT.gPad.Modified()
# | makePlot | identifier_name | |
test.rs | ()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() | {
test::common_tests::async_echo(TestServer::new, TestServer::client)
} | identifier_body | |
test.rs | fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore::empty();
let ca_cert = include_bytes!("tls_ca_cert.der").to_vec();
root_store.add(&Certificate(ca_cert)).unwrap();
let cfg = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
Self {
addr,
config: Arc::new(cfg),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test::helper::TestHandler;
use crate::test::{self, async_test, Server};
use tokio::sync::oneshot;
#[test]
fn test_server_serves_requests() {
test::common_tests::serves_requests(TestServer::new, TestServer::client)
}
#[test]
fn test_server_times_out() {
test::common_tests::times_out(TestServer::with_timeout, TestServer::client)
}
#[test]
fn test_server_async_echo() {
test::common_tests::async_echo(TestServer::new, TestServer::client)
}
#[test]
fn test_server_supports_multiple_servers() {
test::common_tests::supports_multiple_servers(TestServer::new, TestServer::client)
}
#[test]
fn test_server_spawns_and_runs_futures() {
let server = TestServer::new(TestHandler::default()).unwrap();
let (sender, spawn_receiver) = oneshot::channel();
let (spawn_sender, run_receiver) = oneshot::channel();
sender.send(1).unwrap();
server.spawn(async move {
assert_eq!(1, spawn_receiver.await.unwrap());
spawn_sender.send(42).unwrap();
});
assert_eq!(42, server.run_future(run_receiver).unwrap());
}
#[test]
fn test_server_adds_client_address_to_state() {
test::common_tests::adds_client_address_to_state(TestServer::new, TestServer::client);
}
#[tokio::test]
async fn async_test_server_serves_requests() {
async_test::common_tests::serves_requests(AsyncTestServer::new, AsyncTestServer::client)
.await;
}
#[tokio::test]
async fn async_test_server_times_out() {
async_test::common_tests::times_out(
AsyncTestServer::new_with_timeout,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn async_test_server_echo() {
async_test::common_tests::echo(AsyncTestServer::new, AsyncTestServer::client).await;
}
#[tokio::test]
async fn async_test_server_supports_multiple_servers() {
async_test::common_tests::supports_multiple_servers(
AsyncTestServer::new,
AsyncTestServer::client,
)
.await;
}
#[tokio::test]
async fn | async_test_server_adds_client_address_to_state | identifier_name | |
test.rs | = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") | else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr;
async move {
match TcpStream::connect(address).await {
Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore:: | {
tcp.connected().negotiated_h2()
} | conditional_block |
test.rs | = Certificate(include_bytes!("tls_cert.der").to_vec());
let key = PrivateKey(include_bytes!("tls_key.der").to_vec());
ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(vec![cert], key)
.expect("Unable to create TLS server config")
}
/// The `TestServer` type, which is used as a harness when writing test cases for Hyper services
/// (which Gotham's `Router` is). An instance of `TestServer` is run asynchronously within the
/// current thread, and is only accessible by a client returned from the `TestServer`.
///
/// # Examples
///
/// ```rust
/// # extern crate hyper;
/// # extern crate gotham;
/// #
/// # use gotham::state::State;
/// # use hyper::{Body, Response, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # fn main() {
/// use gotham::tls::test::TestServer;
///
/// let test_server = TestServer::new(|| Ok(my_handler)).unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct TestServer {
data: Arc<TestServerData>,
}
impl test::Server for TestServer {
fn run_future<F, O>(&self, future: F) -> O
where
F: Future<Output = O>,
{
self.data.run_future(future)
}
fn request_expiry(&self) -> Sleep {
self.data.request_expiry()
}
}
impl TestServer {
/// Creates a `TestServer` instance for the `Handler` spawned by `new_handler`. This server has
/// the same guarantee given by `hyper::server::Http::bind`, that a new service will be spawned
/// for each connection.
///
/// Timeout will be set to 10 seconds.
pub fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<TestServer> {
TestServer::with_timeout(new_handler, 10)
}
/// Sets the request timeout to `timeout` seconds and returns a new `TestServer`.
pub fn with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: u64,
) -> anyhow::Result<TestServer> {
let cfg = server_config();
let data = TestServerData::new(new_handler, timeout, rustls_wrap(cfg))?;
Ok(TestServer {
data: Arc::new(data),
})
}
/// Returns a client connected to the `TestServer`. The transport is handled internally.
pub fn client(&self) -> TestClient<Self, TestConnect> {
self.data.client(self)
}
/// Spawns the given future on the `TestServer`'s internal runtime.
/// This allows you to spawn more futures ontop of the `TestServer` in your
/// tests.
pub fn spawn<F>(&self, future: F)
where
F: Future<Output = ()> + Send + 'static,
{
self.data.spawn(future)
}
}
/// An [`AsyncTestServer`], that can be used for testing requests against a server in asynchronous contexts.
/// The [`AsyncTestServer`] runs in the runtime where it is created and an [`AsyncTestClient`] can be
/// created to make asynchronous requests to it.
///
/// This differs from [`crate::plain::test::TestServer`] in that it doesn't come with it's own runtime and therefore
/// doesn't crash when used inside of another runtime.
///
/// # Example
///
/// ```rust
/// # use gotham::state::State;
/// # use hyper::{Response, Body, StatusCode};
/// #
/// # fn my_handler(state: State) -> (State, Response<Body>) {
/// # (state, Response::builder().status(StatusCode::ACCEPTED).body(Body::empty()).unwrap())
/// # }
/// #
/// # #[tokio::main]
/// # async fn main() {
/// use gotham::tls::test::AsyncTestServer;
///
/// let test_server = AsyncTestServer::new(|| Ok(my_handler)).await.unwrap();
///
/// let response = test_server.client().get("http://localhost/").perform().await.unwrap();
/// assert_eq!(response.status(), StatusCode::ACCEPTED);
/// # }
/// ```
#[derive(Clone)]
pub struct AsyncTestServer {
inner: Arc<AsyncTestServerInner>,
}
impl AsyncTestServer {
/// Creates an [`AsyncTestServer`] instance for the [`crate::handler::Handler`](`Handler`) spawned by `new_handler`. This server has
/// the same guarantee given by [`hyper::server::Server::bind`], that a new service will be spawned
/// for each connection.
///
/// Requests will time out after 10 seconds by default. Use [`AsyncTestServer::with_timeout`] for a different timeout.
pub async fn new<NH: NewHandler + 'static>(new_handler: NH) -> anyhow::Result<AsyncTestServer> {
AsyncTestServer::new_with_timeout(new_handler, Duration::from_secs(10)).await
}
/// Sets the request timeout to `timeout` seconds and returns a new [`AsyncTestServer`].
pub async fn new_with_timeout<NH: NewHandler + 'static>(
new_handler: NH,
timeout: Duration,
) -> anyhow::Result<AsyncTestServer> {
let cfg = server_config();
let inner = AsyncTestServerInner::new(new_handler, timeout, rustls_wrap(cfg)).await?;
Ok(AsyncTestServer {
inner: Arc::new(inner),
})
}
/// Returns a client connected to the [`AsyncTestServer`]. It can be used to make requests against the test server.
/// The transport is handled internally.
pub fn client(&self) -> AsyncTestClient<crate::tls::test::TestConnect> {
self.inner.client()
}
}
#[allow(missing_docs)]
#[pin_project]
pub struct TlsConnectionStream<IO>(#[pin] TlsStream<IO>);
impl<IO: AsyncRead + AsyncWrite + Connection + Unpin> Connection for TlsConnectionStream<IO> {
fn connected(&self) -> Connected {
let (tcp, tls) = self.0.get_ref();
if tls.alpn_protocol() == Some(b"h2") {
tcp.connected().negotiated_h2()
} else {
tcp.connected()
}
}
}
impl<IO> AsyncRead for TlsConnectionStream<IO>
where
IO: AsyncRead + AsyncWrite + Unpin,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<Result<(), io::Error>> {
self.project().0.poll_read(cx, buf)
}
}
impl<IO: AsyncRead + AsyncWrite + Unpin> AsyncWrite for TlsConnectionStream<IO> {
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
self.project().0.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_flush(cx)
}
#[inline]
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
self.project().0.poll_shutdown(cx)
}
}
/// `TestConnect` represents the connection between a test client and the `TestServer` instance
/// that created it. This type should never be used directly.
#[derive(Clone)]
pub struct TestConnect {
pub(crate) addr: SocketAddr,
pub(crate) config: Arc<rustls::ClientConfig>,
}
impl Service<Uri> for TestConnect {
type Response = TlsConnectionStream<TcpStream>;
type Error = tokio::io::Error;
type Future = BoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, req: Uri) -> Self::Future {
let tls = TlsConnector::from(self.config.clone());
let address = self.addr; | Ok(stream) => {
let domain = ServerName::try_from(req.host().unwrap()).unwrap();
match tls.connect(domain, stream).await {
Ok(tls_stream) => {
info!("Client TcpStream connected: {:?}", tls_stream);
Ok(TlsConnectionStream(tls_stream))
}
Err(error) => {
info!("TLS TestClient error: {:?}", error);
Err(error)
}
}
}
Err(error) => Err(error),
}
}
.boxed()
}
}
impl From<SocketAddr> for TestConnect {
fn from(addr: SocketAddr) -> Self {
let mut root_store = RootCertStore:: |
async move {
match TcpStream::connect(address).await { | random_line_split |
databases.go | TableCreator is a Database that can create new tables which have a Primary Key with columns that have
// prefix lengths.
type IndexedTableCreator interface {
Database
// CreateIndexedTable creates the table with the given name and schema using the index definition provided for its
// primary key index.
CreateIndexedTable(ctx *Context, name string, schema PrimaryKeySchema, idxDef IndexDef, collation CollationID) error
}
// TemporaryTableCreator is a database that can create temporary tables that persist only as long as the session.
// Note that temporary tables with the same name as persisted tables take precedence in most SQL operations.
type TemporaryTableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will
// handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database.
GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) | {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
} | identifier_body | |
databases.go | TableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will
// handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database.
GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
}
// DBTableIter iterates over all tables returned by db.GetTableNames() calling cb for each one until all tables have
// been processed, or an error is returned from the callback, or the cont flag is false when returned from the callback.
func DBTableIter(ctx *Context, db Database, cb func(Table) (cont bool, err error)) error {
names, err := db.GetTableNames(ctx)
if err != nil {
return err
}
for _, name := range names {
tbl, ok, err := db.GetTableInsensitive(ctx, name)
if err != nil | {
return err
} | conditional_block | |
databases.go | (ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will
// handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database.
GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name event is required for renaming of an event.
UpdateEvent(ctx *Context, originalName string, ed EventDefinition) error
// TODO: add ExecuteEvent() method that executes given event and updates the LastExecutedAt value
}
// ViewDatabase is implemented by databases that persist view definitions
type ViewDatabase interface {
// CreateView persists the definition a view with the name and select statement given. If a view with that name
// already exists, should return ErrExistingView
CreateView(ctx *Context, name string, selectStatement, createViewStmt string) error
// DropView deletes the view named from persistent storage. If the view doesn't exist, should return
// ErrViewDoesNotExist
DropView(ctx *Context, name string) error
// GetViewDefinition returns the ViewDefinition of the view with the name given, or false if it doesn't exist.
GetViewDefinition(ctx *Context, viewName string) (ViewDefinition, bool, error)
// AllViews returns the definitions of all views in the database
AllViews(ctx *Context) ([]ViewDefinition, error)
}
// ViewDefinition is the named textual definition of a view
type ViewDefinition struct {
Name string
TextDefinition string
CreateViewStatement string
SqlMode string
}
// GetTableInsensitive implements a case-insensitive map lookup for tables keyed off of the table name.
// Looks for exact matches first. If no exact matches are found then any table matching the name case insensitively
// should be returned. If there is more than one table that matches a case-insensitive comparison the resolution
// strategy is not defined.
func GetTableInsensitive(tblName string, tables map[string]Table) (Table, bool) {
if tbl, ok := tables[tblName]; ok {
return tbl, true
}
lwrName := strings.ToLower(tblName)
for k, tbl := range tables {
if lwrName == strings.ToLower(k) {
return tbl, true
}
}
return nil, false
}
// GetTableNameInsensitive implements a case-insensitive search of a slice of table names. It looks for exact matches
// first. If no exact matches are found then any table matching the name case insensitively should be returned. If
// there is more than one table that matches a case-insensitive comparison the resolution strategy is not defined.
func GetTableNameInsensitive(tblName string, tableNames []string) (string, bool) {
for _, name := range tableNames {
if tblName == name {
return name, true
}
}
lwrName := strings.ToLower(tblName)
for _, name := range tableNames {
if lwrName == strings.ToLower(name) {
return name, true
}
}
return "", false
}
// DBTableIter iterates over all tables returned by db.GetTableNames() calling cb for each one until all tables have
// been processed, or an error is returned from the callback, or the cont flag is false when returned from the callback.
func DBTableIter(ctx *Context, db Database, cb func(Table) (cont bool, err error)) error {
names, err := db.GetTableNames(ctx)
if err != nil {
return err
}
for _, name := range names {
tbl, ok, err := db.GetTableInsensitive(ctx, name)
if err != nil {
return err
} else if !ok {
return ErrTableNotFound.New(name)
}
cont, err := cb(tbl)
if err != nil {
return err
}
if !cont {
break
}
}
return nil
}
// UnresolvedDatabase is a database which has not been resolved yet.
type UnresolvedDatabase string
var _ Database = UnresolvedDatabase("")
// Name returns the database name.
func (d UnresolvedDatabase) Name() string {
return string(d)
}
// Tables returns the tables in the database.
func (UnresolvedDatabase) Tables() map[string]Table {
return make(map[string]Table)
}
func (UnresolvedDatabase) | GetTableInsensitive | identifier_name | |
databases.go | type CollatedDatabaseProvider interface {
MutableDatabaseProvider
// CreateCollatedDatabase creates a collated database and adds it to the provider's collection.
CreateCollatedDatabase(ctx *Context, name string, collation CollationID) error
}
// TableFunctionProvider is an interface that allows custom table functions to be provided. It's usually (but not
// always) implemented by a DatabaseProvider.
type TableFunctionProvider interface {
// TableFunction returns the table function with the name provided, case-insensitive
TableFunction(ctx *Context, name string) (TableFunction, error)
}
// Database represents the database. Its primary job is to provide access to all tables.
type Database interface {
Nameable
// GetTableInsensitive retrieves a table by its case-insensitive name. To be SQL compliant, databases should not
// allow two tables with the same case-insensitive name. Behavior is undefined when two tables have the same
// case-insensitive name.
GetTableInsensitive(ctx *Context, tblName string) (Table, bool, error)
// GetTableNames returns the table names of every table in the database. It does not return the names of temporary
// tables
GetTableNames(ctx *Context) ([]string, error)
}
// Databaser is a node that contains a reference to a database.
type Databaser interface {
// Database the current database.
Database() Database
// WithDatabase returns a new node instance with the database replaced with
// the one given as parameter.
WithDatabase(Database) (Node, error)
}
// Databaseable is a node with a string reference to a database
type Databaseable interface {
Database() string
}
// MultiDatabaser is a node that contains a reference to a database provider. This interface is intended for very
// specific nodes that must resolve databases during execution time rather than during analysis, such as block
// statements where the execution of a nested statement in the block may affect future statements within that same block.
type MultiDatabaser interface {
// DatabaseProvider returns the current DatabaseProvider.
DatabaseProvider() DatabaseProvider
// WithDatabaseProvider returns a new node instance with the database provider replaced with the one given as parameter.
WithDatabaseProvider(DatabaseProvider) (Node, error)
}
// ReadOnlyDatabase is an extension of Database that may declare itself read-only, which will disallow any DDL or DML
// statements from executing.
type ReadOnlyDatabase interface {
Database
// IsReadOnly returns whether this database is read-only.
IsReadOnly() bool
}
// TableCreator is a Database that can create new tables.
type TableCreator interface {
Database
// CreateTable creates the table with the given name and schema.
CreateTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// IndexedTableCreator is a Database that can create new tables which have a Primary Key with columns that have
// prefix lengths.
type IndexedTableCreator interface {
Database
// CreateIndexedTable creates the table with the given name and schema using the index definition provided for its
// primary key index.
CreateIndexedTable(ctx *Context, name string, schema PrimaryKeySchema, idxDef IndexDef, collation CollationID) error
}
// TemporaryTableCreator is a database that can create temporary tables that persist only as long as the session.
// Note that temporary tables with the same name as persisted tables take precedence in most SQL operations.
type TemporaryTableCreator interface {
Database
// CreateTemporaryTable creates the table with the given name and schema. If a temporary table with that name already exists, must
// return sql.ErrTableAlreadyExists
CreateTemporaryTable(ctx *Context, name string, schema PrimaryKeySchema, collation CollationID) error
}
// TableDropper is a Datagbase that can drop tables.
type TableDropper interface {
Database
DropTable(ctx *Context, name string) error
}
// TableRenamer is a database that can rename tables.
type TableRenamer interface {
Database
// RenameTable renames a table from oldName to newName as given.
RenameTable(ctx *Context, oldName, newName string) error
}
// VersionedDatabase is a Database that can return tables as they existed at different points in time. The engine
// supports queries on historical table data via the AS OF construct introduced in SQL 2011.
type VersionedDatabase interface {
Database
// GetTableInsensitiveAsOf retrieves a table by its case-insensitive name with the same semantics as
// Database.GetTableInsensitive, but at a particular revision of the database. Implementors must choose which types
// of expressions to accept as revision names.
GetTableInsensitiveAsOf(ctx *Context, tblName string, asOf interface{}) (Table, bool, error)
// GetTableNamesAsOf returns the table names of every table in the database as of the revision given. Implementors
// must choose which types of expressions to accept as revision names.
GetTableNamesAsOf(ctx *Context, asOf interface{}) ([]string, error)
}
// CollatedDatabase is a Database that can store and update its collation.
type CollatedDatabase interface {
Database
// GetCollation returns this database's collation.
GetCollation(ctx *Context) CollationID
// SetCollation updates this database's collation.
SetCollation(ctx *Context, collation CollationID) error
}
// TriggerDatabase is a Database that supports creating and storing triggers. The engine handles all parsing and
// execution logic for triggers. Integrators are not expected to parse or understand the trigger definitions, but must
// store and return them when asked.
type TriggerDatabase interface {
Database
// GetTriggers returns all trigger definitions for the database
GetTriggers(ctx *Context) ([]TriggerDefinition, error)
// CreateTrigger is called when an integrator is asked to create a trigger. The CREATE TRIGGER statement string is
// provided to store, along with the name of the trigger.
CreateTrigger(ctx *Context, definition TriggerDefinition) error
// DropTrigger is called when a trigger should no longer be stored. The name has already been validated.
// Returns ErrTriggerDoesNotExist if the trigger was not found.
DropTrigger(ctx *Context, name string) error
}
// TriggerDefinition defines a trigger. Integrators are not expected to parse or understand the trigger definitions,
// but must store and return them when asked.
type TriggerDefinition struct {
// The name of this trigger. Trigger names in a database are unique.
Name string
// The text of the statement to create this trigger.
CreateStatement string
// The time that the trigger was created.
CreatedAt time.Time
// SqlMode holds the SQL_MODE that was in use when this trigger was originally defined. It contains information
// needed for how to parse the trigger's SQL, such as whether ANSI_QUOTES mode is enabled.
SqlMode string
}
// TemporaryTableDatabase is a database that can query the session (which manages the temporary table state) to
// retrieve the name of all temporary tables.
type TemporaryTableDatabase interface {
// GetAllTemporaryTables returns the names of all temporary tables in the session.
GetAllTemporaryTables(ctx *Context) ([]Table, error)
}
// TableCopierDatabase is a database that can copy a source table's data (without preserving indexed, fks, etc.) into
// another destination table.
type TableCopierDatabase interface {
// CopyTableData copies the sourceTable data to the destinationTable and returns the number of rows copied.
CopyTableData(ctx *Context, sourceTable string, destinationTable string) (uint64, error)
}
// StoredProcedureDatabase is a database that supports the creation and execution of stored procedures. The engine will
// handle all parsing and execution logic for stored procedures. Integrators only need to store and retrieve
// StoredProcedureDetails, while verifying that all stored procedures have a unique name without regard to
// case-sensitivity.
type StoredProcedureDatabase interface {
Database
// GetStoredProcedure returns the desired StoredProcedureDetails from the database.
GetStoredProcedure(ctx *Context, name string) (StoredProcedureDetails, bool, error)
// GetStoredProcedures returns all StoredProcedureDetails for the database.
GetStoredProcedures(ctx *Context) ([]StoredProcedureDetails, error)
// SaveStoredProcedure stores the given StoredProcedureDetails to the database. The integrator should verify that
// the name of the new stored procedure is unique amongst existing stored procedures.
SaveStoredProcedure(ctx *Context, spd StoredProcedureDetails) error
// DropStoredProcedure removes the StoredProcedureDetails with the matching name from the database.
DropStoredProcedure(ctx *Context, name string) error
}
// EventDatabase is a database that supports the creation and execution of events. The engine will | GetEvent(ctx *Context, name string) (EventDefinition, bool, error)
// GetEvents returns all EventDetails for the database.
GetEvents(ctx *Context) ([]EventDefinition, error)
// SaveEvent stores the given EventDetails to the database. The integrator should verify that
// the name of the new event is unique amongst existing stored procedures.
SaveEvent(ctx *Context, ed EventDefinition) error
// DropEvent removes the EventDetails with the matching name from the database.
DropEvent(ctx *Context, name string) error
// UpdateEvent updates existing event stored in the database with the given EventDetails with the updates.
// The original name | // handle execution logic for events. Integrators only need to store and retrieve EventDetails.
type EventDatabase interface {
Database
// GetEvent returns the desired EventDetails and if it exists in the database. | random_line_split |
funcs.py | (slab,newaxis=None,axis=0,verbose=False):
"""Adds an extra axis to a data slab.
<slab>: variable to which the axis is to insert.
<newaxis>: axis object, could be of any length. If None, create a dummy
singleton axis.
<axis>: index of axis to be inserted, e.g. 0 if <newaxis> is inserted
as the 1st dimension.
Return: <slab2>.
Update time: 2013-10-09 12:34:32.
"""
import cdms2 as cdms
import MV2 as MV
if newaxis is None:
newaxis=cdms.createAxis([1,])
newaxis.units=''
# add new axis to axis list of input <slab>
axislist=slab.getAxisList()
axislist.insert(axis,newaxis)
#----------------Reshape----------------
shape=list(slab.shape)
shape.insert(axis,len(newaxis))
slab2=MV.reshape(slab,shape)
#------------Create variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
'''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
return axis
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1 | addExtraAxis | identifier_name | |
funcs.py | return quantiles
#-------Copies selected attributes from source object to dict--
def attribute_obj2dict(source_object,dictionary=None,verbose=False):
'''Copies selected attributes from source object to dict
to <dictionary>.
<source_object>: object from which attributes are copied.
<dictionary>: None or dict. If None, create a new dict to store
the result. If a dict, use attributes from <source_object>
to overwrite or fill the dict.
Update time: 2016-01-18 11:00:55.
'''
if dictionary is None:
dictionary={}
#------------------Attribute list------------------
att_list=['name','id','dataset','source','title','long_name','standard_name',\
'units','syno','end','harms','filename','comments','description']
#-----------------Copy attributes-----------------
for att in att_list:
if hasattr(source_object,att):
dictionary[att]=getattr(source_object,att).strip()
if verbose:
print('\n# <attribute_obj2dict>: %s: %s' %(att, dictionary[att]))
return dictionary
#-------------Copy attributes from dict to target object----------
def attribute_dict2obj(dictionary,target_object,verbose=False):
'''Copies attributes from dictionary to target object.
<dictionary>: dict, contains attributes to copy.
<target_object>: obj, attributes are copied to.
Return <target_object>: target object with new attributes.
Update time: 2016-01-18 11:31:25.
'''
for att in dictionary.keys():
setattr(target_object,att,dictionary[att])
if verbose:
print('\n# <attribute_dict2obj>: Copy attribute: %s = %s' %(att,dictionary[att]))
return target_object
#-------------------Add an extra axis to a data slab -------------
def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):
"""Adds an extra axis to a data slab.
<slab>: variable to which the axis is to insert.
<newaxis>: axis object, could be of any length. If None, create a dummy
singleton axis.
<axis>: index of axis to be inserted, e.g. 0 if <newaxis> is inserted
as the 1st dimension.
Return: <slab2>.
Update time: 2013-10-09 12:34:32.
"""
import cdms2 as cdms
import MV2 as MV
if newaxis is None:
newaxis=cdms.createAxis([1,])
newaxis.units=''
# add new axis to axis list of input <slab>
axislist=slab.getAxisList()
axislist.insert(axis,newaxis)
#----------------Reshape----------------
shape=list(slab.shape)
shape.insert(axis,len(newaxis))
slab2=MV.reshape(slab,shape)
#------------Create variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
'''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
return axis
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+ | print('# <getQuantiles>: %0.3f left quantile: %f. %0.3f right quantile: %f.'\
%(pii,ql[ii],1-pii,qr[ii]))
| random_line_split | |
funcs.py | variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
'''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
|
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1-lon2)
numerator=(cos(lat2)*sin(dlon))**2 + \
(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon))**2
numerator=np.sqrt(numerator)
denominator=sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(dlon)
dsigma=np.arctan2(numerator,denominator)
arc=r*dsigma
if verbose:
print('\n# <greatCircle>: <dsigma>:',dsigma)
print('# <greatCircle>: <arc>:', arc)
return arc
#----------------------Get a slab from a variable----------------------
def getSlab(var,index1=-1,index2=-2,verbose=True):
'''Get a slab from a variable
<var>: nd array with dimension >=2.
<index1>,<index2>: str, indices denoting the dimensions from which a slab is to slice.
Return <slab>: the (1st) slab from <var>.
E.g. <var> has dimension (12,1,241,4 | return axis | conditional_block |
funcs.py | variable------------
att_dict=attribute_obj2dict(slab)
slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\
typecode='f')
slab2.id=slab.id
if verbose:
print('\n# <addExtraAxis>: Originial variable shape:',slab.shape)
print('# <addExtraAxis>: New variable shape:',slab2.shape)
return slab2
#-------------Concatenate transient variables---------------------
def cat(var1,var2,axis=0,verbose=False):
| try:
attdict=attribute_obj2dict(var1)
hasatt=True
except:
hasatt=False
if not hasattr(var1.getAxis(axis),'units'):
ax=var1.getAxis(axis)
ax.units=''
var1.setAxis(axis,ax)
if not hasattr(var2.getAxis(axis),'units'):
ax=var2.getAxis(axis)
ax.units=''
var2.setAxis(axis,ax)
if verbose:
print('# <cat>: Original order:',order)
if axis!=0:
#----Switch order------
order[axis]=0
order[0]=axis
if verbose:
print('# <cat>: New order:',order)
var1=var1(order=order)
var2=var2(order=order)
result=MV.concatenate((var1,var2))
#result=numpy.concatenate((var1,var2),axis=0)
#NOTE: There seems to be some problems with MV.concatenate() when axis
# is not 0, but can not remember what the problem is. That is why this function
# is written.
# And also some issues regards to the re-ordering and MV.concatenate()
# method defined here. When I concatenated something along the 2nd
# axis and do a MV.std(var,axis=2) (and numpy.std(), an attributeError was raised.
# But other times it works ok. Maybe because of some attributes of my
# variable is gone when putting into MV.std(). No idea why.
# That problem was solved by replacing MV.concatenate() with numpy.concatenate().
# But this will cause the output to be numpy.ndarray rather than MV.transientVariable.
# So be aware that this function may cause some errors if inputs <var1>,<var2>
# are numpy.ndarray.
#-------Switch back----------
result=result(order=order)
else:
result=MV.concatenate((var1,var2))
if hasatt:
result=attribute_dict2obj(attdict,result)
return result
#------Interpret and convert an axis id to index----------
def interpretAxis(axis,ref_var,verbose=True):
'''Interpret and convert an axis id to index
<axis>: axis option, integer or string.
<ref_var>: reference variable.
Return <axis_index>: the index of required axis in <ref_var>.
E.g. index=interpretAxis('time',ref_var)
index=0
index=interpretAxis(1,ref_var)
index=1
Update time: 2013-09-23 13:36:53.
'''
import sys
import numpy
if isinstance(axis,(int,numpy.integer)):
return axis
# interpret string dimension
#elif type(axis)==type('t'):
elif isinstance(axis,str if sys.version_info[0]>=3 else basestring):
axis=axis.lower()
if axis in ['time', 'tim', 't']:
dim_id = 'time'
elif axis in ['level', 'lev','z']:
dim_id = 'level'
elif axis in ['latitude', 'lat','y']:
dim_id = 'latitude'
elif axis in ['longitude', 'long', 'lon','x']:
dim_id = 'longitude'
else:
dim_id = axis
dim_index = ref_var.getAxisIndex(dim_id)
if dim_index==-1:
raise Exception("Required dimension not in <var>.")
return dim_index
else:
raise Exception("<axis> type not recognized.")
#----------Check exsitance of files in file list-----------
def checkFiles(file_list,verbose=True):
'''Check existance of files in a list.
<file_list>: a list of ABSOLUTE paths to be checked;
Usefull before a long list of iteration to make sure every data
file are ready on the disk.
Function prompts enquiry if any file is missing in the list.
'''
import os
import sys
if sys.version_info.major==3:
from builtins import input as input # py2 py3 compatible
else:
input=raw_input
for fileii in file_list:
if os.path.exists(fileii)==False:
print('# <checkFiles>: File not found.',fileii)
input("Press Enter to continue...")
return
#----Get mask for missing data (masked or nan)----
def getMissingMask(slab):
'''Get a bindary denoting missing (masked or nan).
<slab>: nd array, possibly contains masked values or nans.
Return <mask>: nd bindary, 1s for missing, 0s otherwise.
'''
import numpy
nan_mask=numpy.where(numpy.isnan(slab),1,0)
if not hasattr(slab,'mask'):
mask_mask=numpy.zeros(slab.shape)
else:
if slab.mask.size==1 and slab.mask==False:
mask_mask=numpy.zeros(slab.shape)
else:
mask_mask=numpy.where(slab.mask,1,0)
mask=numpy.where(mask_mask+nan_mask>0,1,0)
return mask
#-------Retrieve required axis from variable-------
def getAxis(axis,ref_var,verbose=True):
dim_idx=interpretAxis(axis,ref_var)
try:
ax=ref_var.getAxis(dim_idx)
except:
raise Exception("<axis> %s not found in variable." %str(axis))
if ax is None:
raise Exception("<axis> %s not found in variable." %str(axis))
return ax
def greatCircle(lat1,lon1,lat2,lon2,r=None,verbose=False):
'''Compute the great circle distance on a sphere
<lat1>, <lat2>: scalar float or nd-array, latitudes in degree for
location 1 and 2.
<lon1>, <lon2>: scalar float or nd-array, longitudes in degree for
location 1 and 2.
<r>: scalar float, spherical radius.
Return <arc>: great circle distance on sphere.
<arc> is computed by:
arc = r * dsigma
dsigma = arctan( sqrt(A) / B)
A = (cos(<lat2>) * sin(<dlon>))^2 +
(cos(<lat1>) * sin(<lat2>) - sin(<lat1>) * cos(<lat2>) * cos(<don>))^2
B = sin(<lat1>) * sin(<lat2>) + cos(<lat1>) * cos(<lat2>) * cos(<dlon>)
dlon = abs(lon1 - lon2)
For details see wiki page:
http://en.wikipedia.org/wiki/Great-circle_distance
Update time: 2014-08-11 20:02:05.
'''
import numpy as np
from numpy import sin, cos
if r is None:
r=6371000. #m
d2r=lambda x:x*np.pi/180
lat1,lon1,lat2,lon2=map(d2r,[lat1,lon1,lat2,lon2])
dlon=abs(lon1-lon2)
numerator=(cos(lat2)*sin(dlon))**2 + \
(cos(lat1)*sin(lat2) - sin(lat1)*cos(lat2)*cos(dlon))**2
numerator=np.sqrt(numerator)
denominator=sin(lat1)*sin(lat2)+cos(lat1)*cos(lat2)*cos(dlon)
dsigma=np.arctan2(numerator,denominator)
arc=r*dsigma
if verbose:
print('\n# <greatCircle>: <dsigma>:',dsigma)
print('# <greatCircle>: <arc>:', arc)
return arc
#----------------------Get a slab from a variable----------------------
def getSlab(var,index1=-1,index2=-2,verbose=True):
'''Get a slab from a variable
<var>: nd array with dimension >=2.
<index1>,<index2>: str, indices denoting the dimensions from which a slab is to slice.
Return <slab>: the (1st) slab from <var>.
E.g. <var> has dimension (12,1,241,4 | '''Concatenate 2 variables along axis.
<var1>,<var2>: Variables to be concatenated, in the order of \
<var1>, <var2>;
<axis>: int, index of axis to be concatenated along.
Return <result>
'''
import MV2 as MV
import numpy
try:
order=var1.getAxisListIndex()
except:
order=numpy.arange(var1.ndim) # if var1 is np.ndarray
var1=MV.array(var1)
var2=MV.array(var2)
| identifier_body |
aat_common_test.go | (t *testing.T) {
// adapted from fontttools
src := deHexStr(
"0004 0006 0003 000C 0001 0006 " +
"0002 0001 001E " + // glyph 1..2: mapping at offset 0x1E
"0005 0004 001E " + // glyph 4..5: mapping at offset 0x1E
"FFFF FFFF FFFF " + // end of search table
"0007 0008")
class, _, err := ParseAATLookup(src, 4)
tu.AssertNoErr(t, err)
gids := []GlyphID{1, 2, 4, 5}
classes := []uint16{7, 8, 7, 8}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found := class.Class(0xFF)
tu.Assert(t, !found)
// extracted from macos Tamil MN font
src = []byte{0, 4, 0, 6, 0, 5, 0, 24, 0, 2, 0, 6, 0, 151, 0, 129, 0, 42, 0, 156, 0, 153, 0, 88, 0, 163, 0, 163, 0, 96, 1, 48, 1, 48, 0, 98, 255, 255, 255, 255, 0, 100, 0, 4, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 32}
class, _, err = ParseAATLookup(src, 0xFFFF)
tu.AssertNoErr(t, err)
gids = []GlyphID{132, 129, 144, 145, 146, 140, 137, 130, 135, 138, 133, 139, 142, 143, 136, 134, 147, 141, 151, 132, 150, 148, 149, 304, 153, 154, 163, 155, 156}
classes = []uint16{
12, 4, 24, 25, 26, 20, 17, 10, 15, 18, 13, 19, 22, 23, 16, 14, 27, 21, 31, 12, 30, 28, 29, 32, 5, 6, 9, 7, 8,
}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found = class.Class(0xFF)
tu.Assert(t, !found)
}
func TestParseTrak(t *testing.T) {
fp := readFontFile(t, "toys/Trak.ttf")
trak, _, err := ParseTrak(readTable(t, fp, "trak"))
tu.AssertNoErr(t, err)
tu.Assert(t, len(trak.Horiz.SizeTable) == 4)
tu.Assert(t, len(trak.Vert.SizeTable) == 0)
tu.Assert(t, reflect.DeepEqual(trak.Horiz.SizeTable, []float32{1, 2, 12, 96}))
tu.Assert(t, reflect.DeepEqual(trak.Horiz.TrackTable[0].PerSizeTracking, []int16{200, 200, 0, -100}))
}
func TestParseFeat(t *testing.T) {
fp := readFontFile(t, "toys/Feat.ttf")
feat, _, err := ParseFeat(readTable(t, fp, "feat"))
tu.AssertNoErr(t, err)
expectedSettings := [...][]FeatureSettingName{
{{2, 260}, {4, 259}, {10, 304}},
{{0, 309}, {1, 263}, {3, 264}},
{{0, 266}, {1, 267}},
{{0, 271}, {2, 272}, {8, 273}},
{{0, 309}, {1, 275}, {2, 277}, {3, 278}},
{{0, 309}, {2, 280}},
{{0, 283}},
{{8, 308}},
{{0, 309}, {3, 289}},
{{0, 294}, {1, 295}, {2, 296}, {3, 297}},
{{0, 309}, {1, 301}},
}
tu.Assert(t, len(feat.Names) == len(expectedSettings))
for i, name := range feat.Names {
exp := expectedSettings[i]
got := name.SettingTable
tu.Assert(t, reflect.DeepEqual(exp, got))
}
}
func TestParseAnkr(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ankr.bin")
tu.AssertNoErr(t, err)
ankr, _, err := ParseAnkr(table, 1409)
tu.AssertNoErr(t, err)
_, isFormat4 := ankr.lookupTable.(AATLoopkup4)
tu.Assert(t, isFormat4)
}
func TestParseMorx(t *testing.T) {
files := tu.Filenames(t, "morx")
files = append(files, "toys/Trak.ttf")
for _, filename := range files {
fp := readFontFile(t, filename)
ng := numGlyphs(t, fp)
table, _, err := ParseMorx(readTable(t, fp, "morx"), ng)
tu.AssertNoErr(t, err)
tu.Assert(t, int(table.nChains) == len(table.Chains))
tu.Assert(t, int(table.nChains) == 1)
for _, chain := range table.Chains {
tu.AssertNoErr(t, err)
tu.Assert(t, len(chain.Subtables) == int(chain.nSubtable))
tu.Assert(t, chain.Flags == 1)
}
}
}
func TestMorxLigature(t *testing.T) {
// imported from fonttools
// Taken from “Example 2: A ligature table” in
// https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
// as retrieved on 2017-09-11.
//
// Compared to the example table in Apple’s specification, we’ve
// made the following changes:
//
// * at offsets 0..35, we’ve prepended 36 bytes of boilerplate
// to make the data a structurally valid ‘morx’ table;
//
// * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve
// changed the range of the third segment from 23..24 to 26..28.
// The hexdump values in Apple’s specification are completely wrong;
// the values from the comments would work, but they can be encoded
// more compactly than in the specification example. For round-trip
// testing, we | TestAATLookup4 | identifier_name | |
aat_common_test.go | // extracted from macos Tamil MN font
src = []byte{0, 4, 0, 6, 0, 5, 0, 24, 0, 2, 0, 6, 0, 151, 0, 129, 0, 42, 0, 156, 0, 153, 0, 88, 0, 163, 0, 163, 0, 96, 1, 48, 1, 48, 0, 98, 255, 255, 255, 255, 0, 100, 0, 4, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 0, 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0, 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0, 5, 0, 6, 0, 7, 0, 8, 0, 9, 0, 32}
class, _, err = ParseAATLookup(src, 0xFFFF)
tu.AssertNoErr(t, err)
gids = []GlyphID{132, 129, 144, 145, 146, 140, 137, 130, 135, 138, 133, 139, 142, 143, 136, 134, 147, 141, 151, 132, 150, 148, 149, 304, 153, 154, 163, 155, 156}
classes = []uint16{
12, 4, 24, 25, 26, 20, 17, 10, 15, 18, 13, 19, 22, 23, 16, 14, 27, 21, 31, 12, 30, 28, 29, 32, 5, 6, 9, 7, 8,
}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found = class.Class(0xFF)
tu.Assert(t, !found)
}
func TestParseTrak(t *testing.T) {
fp := readFontFile(t, "toys/Trak.ttf")
trak, _, err := ParseTrak(readTable(t, fp, "trak"))
tu.AssertNoErr(t, err)
tu.Assert(t, len(trak.Horiz.SizeTable) == 4)
tu.Assert(t, len(trak.Vert.SizeTable) == 0)
tu.Assert(t, reflect.DeepEqual(trak.Horiz.SizeTable, []float32{1, 2, 12, 96}))
tu.Assert(t, reflect.DeepEqual(trak.Horiz.TrackTable[0].PerSizeTracking, []int16{200, 200, 0, -100}))
}
func TestParseFeat(t *testing.T) {
fp := readFontFile(t, "toys/Feat.ttf")
feat, _, err := ParseFeat(readTable(t, fp, "feat"))
tu.AssertNoErr(t, err)
expectedSettings := [...][]FeatureSettingName{
{{2, 260}, {4, 259}, {10, 304}},
{{0, 309}, {1, 263}, {3, 264}},
{{0, 266}, {1, 267}},
{{0, 271}, {2, 272}, {8, 273}},
{{0, 309}, {1, 275}, {2, 277}, {3, 278}},
{{0, 309}, {2, 280}},
{{0, 283}},
{{8, 308}},
{{0, 309}, {3, 289}},
{{0, 294}, {1, 295}, {2, 296}, {3, 297}},
{{0, 309}, {1, 301}},
}
tu.Assert(t, len(feat.Names) == len(expectedSettings))
for i, name := range feat.Names {
exp := expectedSettings[i]
got := name.SettingTable
tu.Assert(t, reflect.DeepEqual(exp, got))
}
}
func TestParseAnkr(t *testing.T) {
table, err := td.Files.ReadFile("toys/tables/ankr.bin")
tu.AssertNoErr(t, err)
ankr, _, err := ParseAnkr(table, 1409)
tu.AssertNoErr(t, err)
_, isFormat4 := ankr.lookupTable.(AATLoopkup4)
tu.Assert(t, isFormat4)
}
func TestParseMorx(t *testing.T) {
files := tu.Filenames(t, "morx")
files = append(files, "toys/Trak.ttf")
for _, filename := range files {
fp := readFontFile(t, filename)
ng := numGlyphs(t, fp)
table, _, err := ParseMorx(readTable(t, fp, "morx"), ng)
tu.AssertNoErr(t, err)
tu.Assert(t, int(table.nChains) == len(table.Chains))
tu.Assert(t, int(table.nChains) == 1)
for _, chain := range table.Chains {
tu.AssertNoErr(t, err)
tu.Assert(t, len(chain.Subtables) == int(chain.nSubtable))
tu.Assert(t, chain.Flags == 1)
}
}
}
func TestMorxLigature(t *testing.T) {
// imported from fonttools
// Taken from “Example 2: A ligature table” in
// https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
// as retrieved on 2017-09-11.
//
// Compared to the example table in Apple’s specification, we’ve
// made the following changes:
//
// * at offsets 0..35, we’ve prepended 36 bytes of boilerplate
// to make the data a structurally valid ‘morx’ table;
//
// * at offsets 88..91 (offsets 52..55 in Apple’s document), we’ve
// changed the range of the third segment from 23..24 to 26..28.
// The hexdump values in Apple’s specification are completely wrong;
// the values from the comments would work, but they can be encoded
// more compactly than in the specification example. For round-trip
// testing, we omit the ‘f’ glyph, which makes A | {
// adapted from fontttools
src := deHexStr(
"0004 0006 0003 000C 0001 0006 " +
"0002 0001 001E " + // glyph 1..2: mapping at offset 0x1E
"0005 0004 001E " + // glyph 4..5: mapping at offset 0x1E
"FFFF FFFF FFFF " + // end of search table
"0007 0008")
class, _, err := ParseAATLookup(src, 4)
tu.AssertNoErr(t, err)
gids := []GlyphID{1, 2, 4, 5}
classes := []uint16{7, 8, 7, 8}
for i, gid := range gids {
c, ok := class.Class(gid)
tu.Assert(t, ok)
tu.Assert(t, c == classes[i])
}
_, found := class.Class(0xFF)
tu.Assert(t, !found)
| identifier_body | |
aat_common_test.go | " + // 44: STXHeader.StateArrayOffset=74 (+36=110)
"0000 006E " + // 48: STXHeader.EntryTableOffset=110 (+36=146)
"0000 0086 " + // 52: STXHeader.InsertionActionOffset=134 (+36=170)
// Glyph class table.
"0002 0006 " + // 56: ClassTable.LookupFormat=2, .UnitSize=6
"0006 0018 " + // 60: .NUnits=6, .SearchRange=24
"0002 000C " + // 64: .EntrySelector=2, .RangeShift=12
"00AC 00AC 0005 " + // 68: GlyphID 172..172 -> GlyphClass 5
"01EB 01E6 0005 " + // 74: GlyphID 486..491 -> GlyphClass 5
"01F0 01F0 0004 " + // 80: GlyphID 496..496 -> GlyphClass 4
"01F8 01F6 0004 " + // 88: GlyphID 502..504 -> GlyphClass 4
"01FC 01FA 0004 " + // 92: GlyphID 506..508 -> GlyphClass 4
"0250 0250 0005 " + // 98: GlyphID 592..592 -> GlyphClass 5
"FFFF FFFF 0000 " + // 104: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 " + // 110: State[0][0..5]
"0000 0000 0000 0000 0001 0000 " + // 122: State[1][0..5]
"0000 0000 0001 0000 0001 0002 " + // 134: State[2][0..5]
// Entry table.
"0000 0000 " + // 146: Entries[0].NewState=0, .Flags=0
"FFFF " + // 150: Entries[0].CurrentInsertIndex=<None>
"FFFF " + // 152: Entries[0].MarkedInsertIndex=<None>
"0002 0000 " + // 154: Entries[1].NewState=0, .Flags=0
"FFFF " + // 158: Entries[1].CurrentInsertIndex=<None>
"FFFF " + // 160: Entries[1].MarkedInsertIndex=<None>
"0000 " + // 162: Entries[2].NewState=0
"2820 " + // 164: .Flags=CurrentIsKashidaLike,CurrentInsertBefore
// .CurrentInsertCount=1, .MarkedInsertCount=0
"0000 " + // 166: Entries[1].CurrentInsertIndex=0
"FFFF " + // 168: Entries[1].MarkedInsertIndex=<None>
// Insertion action table.
"022F") // 170: InsertionActionTable[0]=GlyphID 559
tu.Assert(t, len(morxInsertionData) == 172)
out, _, err := ParseMorx(morxInsertionData, 910)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
insert, ok := subtable.Data.(MorxSubtableInsertion)
tu.Assert(t, ok)
machine := insert.AATStateTableExt
tu.Assert(t, machine.StateSize == 6)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 172, LastGlyph: 172, Value: 5},
{FirstGlyph: 486, LastGlyph: 491, Value: 5},
{FirstGlyph: 496, LastGlyph: 496, Value: 4},
{FirstGlyph: 502, LastGlyph: 504, Value: 4},
{FirstGlyph: 506, LastGlyph: 508, Value: 4},
{FirstGlyph: 592, LastGlyph: 592, Value: 5},
}
tu.Assert(t, reflect.DeepEqual(class.Records, expMachineClassRecords))
expMachineStates := [][]uint16{
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 110: State[0][0..5]
{0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000}, // 122: State[1][0..5]
{0x0000, 0x0000, 0x0001, 0x0000, 0x0001, 0x0002}, // 134: State[2][0..5]
}
tu.Assert(t, reflect.DeepEqual(machine.States, expMachineStates))
expMachineEntries := []AATStateEntry{
{NewState: 0, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0x0002, Flags: 0, data: [4]byte{0xff, 0xff, 0xff, 0xff}},
{NewState: 0, Flags: 0x2820, data: [4]byte{0, 0, 0xff, 0xff}},
}
tu.Assert(t, reflect.DeepEqual(machine.Entries, expMachineEntries))
tu.Assert(t, reflect.DeepEqual(insert.Insertions, []GlyphID{0x022f}))
}
func TestParseKerx(t *testing.T) {
for _, filepath := range []string{
"toys/tables/kerx0.bin",
"toys/tables/kerx2.bin",
"toys/tables/kerx2bis.bin",
"toys/tables/kerx24.bin",
"toys/tables/kerx4-1.bin",
"toys/tables/kerx4-2.bin",
"toys/tables/kerx6Exp-VF.bin",
"toys/tables/kerx6-VF.bin",
} {
table, err := td.Files.Rea | dFile(filepath)
tu.AssertNoErr(t, err)
kerx, _, err := ParseKerx(table, 0xFF)
tu.AssertNoErr(t, err)
tu.Assert(t, len(kerx.Tables) > 0)
for _, subtable := range kerx.Tables {
tu.Assert(t, subtable.TupleCount > 0 == strings.Contains(filepath, "VF"))
switch data := subtable.Data.(type) {
case KerxData0:
tu.Assert(t, len(data.Pairs) > 0)
case KerxData2:
tu.Assert(t, data.Left != nil)
tu.Assert(t, data.Right != nil)
tu.Assert(t, int(data.KerningStart) <= len(data.KerningData))
case KerxData4:
tu.Assert(t, data.Anchors != nil)
}
} | conditional_block | |
aat_common_test.go | morxLigatureData := deHexStr(
"0002 0000 " + // 0: Version=2, Reserved=0
"0000 0001 " + // 4: MorphChainCount=1
"0000 0001 " + // 8: DefaultFlags=1
"0000 00DA " + // 12: StructLength=218 (+8=226)
"0000 0000 " + // 16: MorphFeatureCount=0
"0000 0001 " + // 20: MorphSubtableCount=1
"0000 00CA " + // 24: Subtable[0].StructLength=202 (+24=226)
"80 " + // 28: Subtable[0].CoverageFlags=0x80
"00 00 " + // 29: Subtable[0].Reserved=0
"02 " + // 31: Subtable[0].MorphType=2/LigatureMorph
"0000 0001 " + // 32: Subtable[0].SubFeatureFlags=0x1
// State table header.
"0000 0007 " + // 36: STXHeader.ClassCount=7
"0000 001C " + // 40: STXHeader.ClassTableOffset=28 (+36=64)
"0000 0040 " + // 44: STXHeader.StateArrayOffset=64 (+36=100)
"0000 0078 " + // 48: STXHeader.EntryTableOffset=120 (+36=156)
"0000 0090 " + // 52: STXHeader.LigActionsOffset=144 (+36=180)
"0000 009C " + // 56: STXHeader.LigComponentsOffset=156 (+36=192)
"0000 00AE " + // 60: STXHeader.LigListOffset=174 (+36=210)
// Glyph class table.
"0002 0006 " + // 64: ClassTable.LookupFormat=2, .UnitSize=6
"0003 000C " + // 68: .NUnits=3, .SearchRange=12
"0001 0006 " + // 72: .EntrySelector=1, .RangeShift=6
"0016 0014 0004 " + // 76: GlyphID 20..22 [a..c] -> GlyphClass 4
"0018 0017 0005 " + // 82: GlyphID 23..24 [d..e] -> GlyphClass 5
"001C 001A 0006 " + // 88: GlyphID 26..28 [g..i] -> GlyphClass 6
"FFFF FFFF 0000 " + // 94: <end of lookup>
// State array.
"0000 0000 0000 0000 0001 0000 0000 " + // 100: State[0][0..6]
"0000 0000 0000 0000 0001 0000 0000 " + // 114: State[1][0..6]
"0000 0000 0000 0000 0001 0002 0000 " + // 128: State[2][0..6]
"0000 0000 0000 0000 0001 0002 0003 " + // 142: State[3][0..6]
// Entry table.
"0000 0000 " + // 156: Entries[0].NewState=0, .Flags=0
"0000 " + // 160: Entries[0].ActionIndex=<n/a> because no 0x2000 flag
"0002 8000 " + // 162: Entries[1].NewState=2, .Flags=0x8000 (SetComponent)
"0000 " + // 166: Entries[1].ActionIndex=<n/a> because no 0x2000 flag
"0003 8000 " + // 168: Entries[2].NewState=3, .Flags=0x8000 (SetComponent)
"0000 " + // 172: Entries[2].ActionIndex=<n/a> because no 0x2000 flag
"0000 A000 " + // 174: Entries[3].NewState=0, .Flags=0xA000 (SetComponent,Act)
"0000 " + // 178: Entries[3].ActionIndex=0 (start at Action[0])
// Ligature actions table.
"3FFF FFE7 " + // 180: Action[0].Flags=0, .GlyphIndexDelta=-25
"3FFF FFED " + // 184: Action[1].Flags=0, .GlyphIndexDelta=-19
"BFFF FFF2 " + // 188: Action[2].Flags=<end of list>, .GlyphIndexDelta=-14
// Ligature component table.
"0000 0001 " + // 192: LigComponent[0]=0, LigComponent[1]=1
"0002 0003 " + // 196: LigComponent[2]=2, LigComponent[3]=3
"0000 0004 " + // 200: LigComponent[4]=0, LigComponent[5]=4
"0000 0008 " + // 204: LigComponent[6]=0, LigComponent[7]=8
"0010 " + // 208: LigComponent[8]=16
// Ligature list.
"03E8 03E9 " + // 210: LigList[0]=1000, LigList[1]=1001
"03EA 03EB " + // 214: LigList[2]=1002, LigList[3]=1003
"03EC 03ED " + // 218: LigList[4]=1004, LigList[3]=1005
"03EE 03EF ") // 222: LigList[5]=1006, LigList[6]=1007
tu.Assert(t, len(morxLigatureData) == 226)
out, _, err := ParseMorx(morxLigatureData, 1515)
tu.AssertNoErr(t, err)
tu.Assert(t, len(out.Chains) == 1)
chain := out.Chains[0]
tu.Assert(t, chain.Flags == 1)
tu.Assert(t, len(chain.Subtables) == 1)
subtable := chain.Subtables[0]
const vertical uint8 = 0x80
tu.Assert(t, subtable.Coverage == vertical)
tu.Assert(t, subtable.SubFeatureFlags == 1)
lig, ok := subtable.Data.(MorxSubtableLigature)
tu.Assert(t, ok)
machine := lig.AATStateTableExt
tu.Assert(t, machine.StateSize == 7)
class, ok := machine.Class.(AATLoopkup2)
tu.Assert(t, ok)
expMachineClassRecords := []LookupRecord2{
{FirstGlyph: 20, LastGlyph: 22, Value: 4},
{FirstGlyph: 23, LastGlyph: 24, Value: 5}, | random_line_split | ||
args.go | p (bool) [int]
//
// Tests the value passed in for truth (a boolean predicate) and
// converts the result to its equivalent C true/false integer
// value. Sets the int to 1 if the expression was true and 0 if it was
// false. This accepts any valid Python value. See Truth Value Testing
// for more information about how Python tests values for truth.
//
// New in version 3.3.
//
// (items) (tuple) [matching-items]
//
// The object must be a Python sequence whose length is the number of
// format units in items. The C arguments must correspond to the
// individual format units in items. Format units for sequences may be
// nested.
//
// It is possible to pass “long” integers (integers whose value
// exceeds the platform’s LONG_MAX) however no proper range checking
// is done — the most significant bits are silently truncated when the
// receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := results[i]
switch op.code {
case 'O':
*result = arg
case 'Z':
switch op.modifier {
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
case '#', 0:
switch arg := arg.(type) {
case String, NoneType:
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'z':
switch op.modifier {
default:
switch arg := arg.(type) {
case String, NoneType:
// ok
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes, NoneType:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str, bytes-like or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'U':
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 's':
switch op.modifier {
default:
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'y':
switch op.modifier {
default:
if _, ok := arg.(Bytes); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'i', 'n':
if _, ok := arg.(Int); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be int, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'p':
if _, ok := arg.(Bool); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bool, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'd':
switch x := arg.(type) {
case Int:
*result = Float(x)
case Float:
*result = x
default:
return ExceptionNewf(TypeError, "%s() argument %d must be float, not %s", name, i+1, arg.Type().Name)
}
default:
return ExceptionNewf(TypeError, "Unknown/Unimplemented format character %q in ParseTupleAndKeywords called from %s", op, name)
}
}
return nil
}
// Parse tuple only
func ParseTuple(args Tuple, format | string, re | identifier_name | |
args.go | // This format converts a bytes-like object to a C pointer to a
// character string; it does not accept Unicode objects. The bytes
// buffer must not contain embedded NUL bytes; if it does, a TypeError
// exception is raised.
//
// y* (bytes, bytearray or bytes-like object) [Py_buffer]
//
// This variant on s* doesn’t accept Unicode objects, only bytes-like
// objects. This is the recommended way to accept binary data.
//
// y# (bytes) [const char *, int]
//
// This variant on s# doesn’t accept Unicode objects, only bytes-like
// objects.
//
// S (bytes) [PyBytesObject *]
//
// Requires that the Python object is a bytes object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytes object. The C variable may also be declared as PyObject*.
//
// Y (bytearray) [PyByteArrayObject *]
//
// Requires that the Python object is a bytearray object, without
// attempting any conversion. Raises TypeError if the object is not a
// bytearray object. The C variable may also be declared as PyObject*.
//
// u (str) [Py_UNICODE *]
//
// Convert a Python Unicode object to a C pointer to a NUL-terminated
// buffer of Unicode characters. You must pass the address of a
// Py_UNICODE pointer variable, which will be filled with the pointer
// to an existing Unicode buffer. Please note that the width of a
// Py_UNICODE character depends on compilation options (it is either
// 16 or 32 bits). The Python string must not contain embedded NUL
// characters; if it does, a TypeError exception is raised.
//
// Note Since u doesn’t give you back the length of the string, and it
// may contain embedded NUL characters, it is recommended to use u# or
// U instead.
//
// u# (str) [Py_UNICODE *, int]
//
// This variant on u stores into two C variables, the first one a
// pointer to a Unicode data buffer, the second one its length.
//
// Z (str or None) [Py_UNICODE *]
//
// Like u, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// Z# (str or None) [Py_UNICODE *, int]
//
// Like u#, but the Python object may also be None, in which case the
// Py_UNICODE pointer is set to NULL.
//
// U (str) [PyObject *]
//
// Requires that the Python object is a Unicode object, without
// attempting any conversion. Raises TypeError if the object is not a
// Unicode object. The C variable may also be declared as PyObject*.
//
// w* (bytearray or read-write byte-oriented buffer) [Py_buffer]
//
// This format accepts any object which implements the read-write
// buffer interface. It fills a Py_buffer structure provided by the
// caller. The buffer may contain embedded null bytes. The caller have
// to call PyBuffer_Release() when it is done with the buffer.
//
// es (str) [const char *encoding, char **buffer]
//
// This variant on s is used for encoding Unicode into a character
// buffer. It only works for encoded data without embedded NUL bytes.
//
// This format requires two arguments. The first is only used as
// input, and must be a const char* which points to the name of an
// encoding as a NUL-terminated string, or NULL, in which case 'utf-8'
// encoding is used. An exception is raised if the named encoding is
// not known to Python. The second argument must be a char**; the
// value of the pointer it references will be set to a buffer with the
// contents of the argument text. The text will be encoded in the
// encoding specified by the first argument.
//
// PyArg_ParseTuple() will allocate a buffer of the needed size, copy
// the encoded data into this buffer and adjust *buffer to reference
// the newly allocated storage. The caller is responsible for calling
// PyMem_Free() to free the allocated buffer after use.
//
// et (str, bytes or bytearray) [const char *encoding, char **buffer]
//
// Same as es except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// es# (str) [const char *encoding, char **buffer, int *buffer_length]
//
// This variant on s# is used for encoding Unicode into a character
// buffer. Unlike the es format, this variant allows input data which
// contains NUL characters.
//
// It requires three arguments. The first is only used as input, and
// must be a const char* which points to the name of an encoding as a
// NUL-terminated string, or NULL, in which case 'utf-8' encoding is
// used. An exception is raised if the named encoding is not known to
// Python. The second argument must be a char**; the value of the
// pointer it references will be set to a buffer with the contents of
// the argument text. The text will be encoded in the encoding
// specified by the first argument. The third argument must be a
// pointer to an integer; the referenced integer will be set to the
// number of bytes in the output buffer.
//
// There are two modes of operation:
//
// If *buffer points a NULL pointer, the function will allocate a
// buffer of the needed size, copy the encoded data into this buffer
// and set *buffer to reference the newly allocated storage. The
// caller is responsible for calling PyMem_Free() to free the
// allocated buffer after usage.
//
// If *buffer points to a non-NULL pointer (an already allocated
// buffer), PyArg_ParseTuple() will use this location as the buffer
// and interpret the initial value of *buffer_length as the buffer
// size. It will then copy the encoded data into the buffer and
// NUL-terminate it. If the buffer is not large enough, a ValueError
// will be set.
//
// In both cases, *buffer_length is set to the length of the encoded
// data without the trailing NUL byte.
//
// et# (str, bytes or bytearray) [const char *encoding, char **buffer,
// int *buffer_length]
//
// Same as es# except that byte string objects are passed through
// without recoding them. Instead, the implementation assumes that the
// byte string object uses the encoding passed in as parameter.
//
// Numbers
//
// b (int) [unsigned char]
//
// Convert a nonnegative Python integer to an unsigned tiny int,
// stored in a C unsigned char.
//
// B (int) [unsigned char]
//
// Convert a Python integer to a tiny int without overflow checking,
// stored in a C unsigned char. h (int) [short int]
//
// Convert a Python integer to a C short int.
//
// H (int) [unsigned short int]
//
// Convert a Python integer to a C unsigned short int, without
// overflow checking.
//
// i (int) [int]
//
// Convert a Python integer to a plain C int.
//
// I (int) [unsigned int]
//
// Convert a Python integer to a C unsigned int, without overflow
// checking.
//
// l (int) [long int]
//
// Convert a Python integer to a C long int.
//
// k (int) [unsigned long]
//
// Convert a Python integer to a C unsigned long without overflow
// checking.
//
// L (int) [PY_LONG_LONG]
//
// Convert a Python integer to a C long long. This format is only
// available on platforms that support long long (or _int64 on
// Windows).
//
// K (int) [unsigned PY_LONG_LONG]
//
// Convert a Python integer to a C unsigned long long without overflow
// checking. This format is only available on platforms that support
// unsigned long long (or unsigned _int64 on Windows).
//
// n (int) [Py_ssize_t]
//
// Convert a Python integer to a C Py_ssize_t.
//
// c (bytes or bytearray of length 1) [char]
//
// Convert a Python byte, represented as a bytes or bytearray object
// of length 1, to a C char.
//
// Changed in version 3.3: Allow bytearray objects.
//
// C (str of length 1) [int]
//
// Convert a Python character, represented as a str object of length 1, to a C int.
//
// f (float) [float]
//
// Convert a Python floating point number to a C float.
//
// d (float) [double]
//
// Convert a Python floating point number to a C double.
//
// D (complex) [Py_complex]
//
// Convert a Python complex number to a C Py_complex structure.
//
// Other objects
//
// O (object) [PyObject *]
//
// Store a Python object (without any conversion) in a C object
// pointer. The C program thus receives the actual object that was
// passed. The object’s reference count is not increased. The pointer
// stored is not NULL.
//
// O! (object) [typeobject, PyObject *]
//
// Store a Python object in a C object pointer. This is similar to O,
// but takes two C arguments: the first is the address of a Python
// type object, the second is the address of the C variable (of type
// | random_line_split | ||
args.go | assumes that the
// byte string object uses the encoding passed in as parameter.
//
// Numbers
//
// b (int) [unsigned char]
//
// Convert a nonnegative Python integer to an unsigned tiny int,
// stored in a C unsigned char.
//
// B (int) [unsigned char]
//
// Convert a Python integer to a tiny int without overflow checking,
// stored in a C unsigned char. h (int) [short int]
//
// Convert a Python integer to a C short int.
//
// H (int) [unsigned short int]
//
// Convert a Python integer to a C unsigned short int, without
// overflow checking.
//
// i (int) [int]
//
// Convert a Python integer to a plain C int.
//
// I (int) [unsigned int]
//
// Convert a Python integer to a C unsigned int, without overflow
// checking.
//
// l (int) [long int]
//
// Convert a Python integer to a C long int.
//
// k (int) [unsigned long]
//
// Convert a Python integer to a C unsigned long without overflow
// checking.
//
// L (int) [PY_LONG_LONG]
//
// Convert a Python integer to a C long long. This format is only
// available on platforms that support long long (or _int64 on
// Windows).
//
// K (int) [unsigned PY_LONG_LONG]
//
// Convert a Python integer to a C unsigned long long without overflow
// checking. This format is only available on platforms that support
// unsigned long long (or unsigned _int64 on Windows).
//
// n (int) [Py_ssize_t]
//
// Convert a Python integer to a C Py_ssize_t.
//
// c (bytes or bytearray of length 1) [char]
//
// Convert a Python byte, represented as a bytes or bytearray object
// of length 1, to a C char.
//
// Changed in version 3.3: Allow bytearray objects.
//
// C (str of length 1) [int]
//
// Convert a Python character, represented as a str object of length 1, to a C int.
//
// f (float) [float]
//
// Convert a Python floating point number to a C float.
//
// d (float) [double]
//
// Convert a Python floating point number to a C double.
//
// D (complex) [Py_complex]
//
// Convert a Python complex number to a C Py_complex structure.
//
// Other objects
//
// O (object) [PyObject *]
//
// Store a Python object (without any conversion) in a C object
// pointer. The C program thus receives the actual object that was
// passed. The object’s reference count is not increased. The pointer
// stored is not NULL.
//
// O! (object) [typeobject, PyObject *]
//
// Store a Python object in a C object pointer. This is similar to O,
// but takes two C arguments: the first is the address of a Python
// type object, the second is the address of the C variable (of type
// PyObject*) into which the object pointer is stored. If the Python
// object does not have the required type, TypeError is raised.
//
// O& (object) [converter, anything]
//
// Convert a Python object to a C variable through a converter
// function. This takes two arguments: the first is a function, the
// second is the address of a C variable (of arbitrary type),
// converted to void *. The converter function in turn is called as
// follows:
//
// status = converter(object, address);
//
// where object is the Python object to be converted and address is
// the void* argument that was passed to the PyArg_Parse*()
// function. The returned status should be 1 for a successful
// conversion and 0 if the conversion has failed. When the conversion
// fails, the converter function should raise an exception and leave
// the content of address unmodified.
//
// If the converter returns Py_CLEANUP_SUPPORTED, it may get called a
// second time if the argument parsing eventually fails, giving the
// converter a chance to release any memory that it had already
// allocated. In this second call, the object parameter will be NULL;
// address will have the same value as in the original call.
//
// Changed in version 3.1: Py_CLEANUP_SUPPORTED was added.
//
// p (bool) [int]
//
// Tests the value passed in for truth (a boolean predicate) and
// converts the result to its equivalent C true/false integer
// value. Sets the int to 1 if the expression was true and 0 if it was
// false. This accepts any valid Python value. See Truth Value Testing
// for more information about how Python tests values for truth.
//
// New in version 3.3.
//
// (items) (tuple) [matching-items]
//
// The object must be a Python sequence whose length is the number of
// format units in items. The C arguments must correspond to the
// individual format units in items. Format units for sequences may be
// nested.
//
// It is possible to pass “long” integers (integers whose value
// exceeds the platform’s LONG_MAX) however no proper range checking
// is done — the most significant bits are silently truncated when the
// receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := | results[i]
swi | conditional_block | |
args.go | receiving field is too small to receive the value (actually, the
// semantics are inherited from downcasts in C — your mileage may
// vary).
//
// A few other characters have a meaning in a format string. These may
// not occur inside nested parentheses. They are:
//
// |
//
// Indicates that the remaining arguments in the Python argument list
// are optional. The C variables corresponding to optional arguments
// should be initialized to their default value — when an optional
// argument is not specified, PyArg_ParseTuple() does not touch the
// contents of the corresponding C variable(s).
//
// $
//
// PyArg_ParseTupleAndKeywords() only: Indicates that the remaining
// arguments in the Python argument list are keyword-only.
//
// New in version 3.3.
//
// :
//
// The list of format units ends here; the string after the colon is
// used as the function name in error messages (the “associated value”
// of the exception that PyArg_ParseTuple() raises).
//
// ;
//
// The list of format units ends here; the string after the semicolon
// is used as the error message instead of the default error
// message. : and ; mutually exclude each other.
//
// Note that any Python object references which are provided to the
// caller are borrowed references; do not decrement their reference
// count!
//
// Additional arguments passed to these functions must be addresses of
// variables whose type is determined by the format string; these are
// used to store values from the input tuple. There are a few cases,
// as described in the list of format units above, where these
// parameters are used as input values; they should match what is
// specified for the corresponding format unit in that case.
//
// For the conversion to succeed, the arg object must match the format
// and the format must be exhausted. On success, the PyArg_Parse*()
// functions return true, otherwise they return false and raise an
// appropriate exception. When the PyArg_Parse*() functions fail due
// to conversion failure in one of the format units, the variables at
// the addresses corresponding to that and the following format units
// are left untouched.
package py
// FIXME this would be a lot more useful if we could supply the
// address of a String rather than an Object - would then need
// introspection to set it properly
// ParseTupleAndKeywords
func ParseTupleAndKeywords(args Tuple, kwargs StringDict, format string, kwlist []string, results ...*Object) error {
if kwlist != nil && len(results) != len(kwlist) {
return ExceptionNewf(TypeError, "Internal error: supply the same number of results and kwlist")
}
var opsBuf [16]formatOp
min, name, kwOnly_i, ops := parseFormat(format, opsBuf[:0])
err := checkNumberOfArgs(name, len(args)+len(kwargs), len(results), min, len(ops))
if err != nil {
return err
}
// Check all the kwargs are in kwlist
// O(N^2) Slow but kwlist is usually short
for kwargName := range kwargs {
for _, kw := range kwlist {
if kw == kwargName {
goto found
}
}
return ExceptionNewf(TypeError, "%s() got an unexpected keyword argument '%s'", name, kwargName)
found:
}
// Walk through all the results we want
for i, op := range ops {
var (
arg Object
kw string
)
if i < len(kwlist) {
kw = kwlist[i]
arg = kwargs[kw]
}
// Consume ordered args first -- they should not require keyword only or also be specified via keyword
if i < len(args) {
if i >= kwOnly_i {
return ExceptionNewf(TypeError, "%s() specifies argument '%s' that is keyword only", name, kw)
}
if arg != nil {
return ExceptionNewf(TypeError, "%s() got multiple values for argument '%s'", name, kw)
}
arg = args[i]
}
// Unspecified args retain their default value
if arg == nil {
continue
}
result := results[i]
switch op.code {
case 'O':
*result = arg
case 'Z':
switch op.modifier {
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
case '#', 0:
switch arg := arg.(type) {
case String, NoneType:
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'z':
switch op.modifier {
default:
switch arg := arg.(type) {
case String, NoneType:
// ok
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or None, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes, NoneType:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str, bytes-like or None, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'U':
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 's':
switch op.modifier {
default:
if _, ok := arg.(String); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be str, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case String, Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be str or bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'y':
switch op.modifier {
default:
if _, ok := arg.(Bytes); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
case '#':
fallthrough // FIXME(sbinet): check for read-only?
case '*':
switch arg := arg.(type) {
case Bytes:
// ok.
default:
return ExceptionNewf(TypeError, "%s() argument %d must be bytes-like, not %s", name, i+1, arg.Type().Name)
}
}
*result = arg
case 'i', 'n':
if _, ok := arg.(Int); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be int, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'p':
if _, ok := arg.(Bool); !ok {
return ExceptionNewf(TypeError, "%s() argument %d must be bool, not %s", name, i+1, arg.Type().Name)
}
*result = arg
case 'd':
switch x := arg.(type) {
case Int:
*result = Float(x)
case Float:
*result = x
default:
return ExceptionNewf(TypeError, "%s() argument %d must be float, not %s", name, i+1, arg.Type().Name)
}
default:
return ExceptionNewf(TypeError, "Unknown/Unimplemented format character %q in ParseTupleAndKeywords called from %s", op, name)
}
}
return nil
}
// Parse tuple only
func ParseTuple(args Tuple, format string, results ...*Object) error {
return ParseTupleAndKeywords(args, nil, format, nil, results...)
}
type formatOp struct {
code byte
modifier byte
}
// Parse the format
func parseFormat(format string, in []formatOp) (min int, name string, kwOnly_i int, ops []formatOp) {
name = "function"
min = -1 |
kwOnly_i = 0xFFFF
ops = in[:0]
N := len(format)
for i := 0; i < N; {
op := formatOp{code: format[i]}
i++
if i < N {
if mod := format[i]; mod == '*' || mod == '#' {
op.modifier = mod
i++
}
}
switch op.code {
case ':', ';':
name = format[i:]
i = N
case '$':
kwOnly_i = len(ops) | identifier_body | |
sudo.py | import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull
extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0])
#print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initi | eight for running average
alphaWeight = 0.5 #if we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.read()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) | alize w | identifier_name |
sudo.py | 2
import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull | #print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initialize weight for running average
alphaWeight = 0.5 #if we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.read()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand
| extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0]) | random_line_split |
sudo.py | import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull
extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0])
#print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initialize weight for running average
alphaWeight = 0.5 #if |
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight)
else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand | we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.read()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width)) | identifier_body |
sudo.py | import imutils
import numpy as np
from sklearn.metrics import pairwise
import mss, os
#-----------------------------------------------------------------------------------------------------------------------------------
#global var
background = None
_cnt = 0
#-----------------------------------------------------------------------------------------------------------------------------------
#directory operations
dir_path = os.getcwd()
print(dir_path)
full_path = dir_path + '\screenshots'
if not os.path.exists(full_path):
os.makedirs(full_path)
print(full_path)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
dst(x,y)=(1−alpha).dst(x,y)+alpha.src(x,y)
Parameters for accumulateWeighted():
src – Input image as 1- or 3-channel, 8-bit or 32-bit floating point.
dst – Accumulator image with the same number of channels as input image, 32-bit or 64-bit floating-point.
alpha – Weight of the input image.
mask – Optional operation mask.
The function accumulateWeighted calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence.
alpha regulates the update speed (how fast the accumulator “forgets” about earlier images). The function supports multi-channel images. Each channel is processed independently.
#img - current frame
#avgWeight - threshold to perform running average over images
#accumulateWeighted() - compute running average over background and current frame
'''
def compute_running_average(image, avgWeight):
global background
if background is None:
background = image.copy().astype("float")
return
cv2.accumulateWeighted(image, background, avgWeight)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
#Threshold Logic:
#when x(n) is pixel,
if n >= threshold:
x(n) = 1
else:
x(n) = 0
cv2.contourArea uses green's theorem to find area.
Green's Theorem:
Let 'c' be a positively oriented, piecewise smooth, simple closed curve in a plane, and let d be the region bounded by c. If P and Q are functions of (x, y) defined on an open region containing d and have continuous partial derivatives there, then,
INc(P dx + Q dy) = IN(INd( daba Q / daba x - daba P / daba y)) dx dy
where, IN - integral
#cv2.findContours() --> image, retrievalmode, approximationmethod
#cv2.RETR_EXTERNAL --> retrieves only the extreme outer contours.
#cv2.CHAIN_APPROX_SIMPLE --> compresses horizontal, vertical, and diagonal segments and leaves only their end points. For example, an up-right rectangular contour is encoded with 4 points.
'''
def segmentation(image, threshold=25):
global background
diff = cv2.absdiff(background.astype("uint8"), image) #absolute difference between background and image(current frame)
#print(diff)
thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1] #cv2.threshold() returns two o/p. First is retval and second is threshold image. Hence, we choose second val [1]
#print(thresholded)
(_, cnts, _) = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #Finds contours in a binary image.
#print(cnt)
if len(cnts) == 0:
return
else:
segmented = max(cnts, key = cv2.contourArea)
return (thresholded, segmented)
#-----------------------------------------------------------------------------------------------------------------------------------
'''
Four Intermediate Steps
1. Find the convex hull of the segmented hand region (which is a contour) and compute the most extreme points in the convex hull (Extreme Top, Extreme Bottom, Extreme Left, Extreme Right).
2. Find the center of palm using these extremes points in the convex hull.
3. Using the palm’s center, construct a circle with the maximum Euclidean distance (between the palm’s center and the extreme points) as radius.
4. Perform bitwise AND operation between the thresholded hand image (frame) and the circular ROI (mask). This reveals the finger slices, which could further be used to calcualate the number of fingers shown.
convex_hull of 2d points using Sklansky's Algorithm (OpenCV Doc)
'''
def count_fingers(thresholded, segmented):
#convex hull of segmented region
conver_hull = cv2.convexHull(segmented)
#extremePoints in the convex hull
extreme_top = tuple(convex_hull[convex_hull[:, :, 1].argmin()][0])
extreme_bottom = tuple(convex_hull[convex_hull[:, :, 1].argmax()][0])
extreme_left = tuple(convex_hull[convex_hull[:, :, 0].argmin()][0])
extreme_right = tuple(convex_hull[convex_hull[:, :, 0].argmax()][0])
#print(extreme_top + " " + extreme_bottom + " " + extreme_left + " " + extreme_right)
#palm center
cX = (extreme_left[0] + extreme_right[0]) / 2
cY = (extreme_top[1] + extreme_bottom[1]) / 2
cX = np.round(cX).astype("int") #convert to int
cY = np.round(cY).astype("int")
#maximum euclidean distance between palm center and extremePoints
distance = pairwise.euclidean_distances([(cX, cY)], Y=[extreme_left, extreme_right, extreme_top, extreme_bottom])[0]
maximum_distance = distance[distance.argmax()]
#print(maximum_distance)
#Radius of the circle
radius = int(0.8 * maximum_distance)
#Circumference of the circle
circumference = (2 * np.pi * radius)
#extract circulat roi which has palm and fingers
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
print(circular_roi)
circulat_roi = np.round(circular_roi).astype("int")
#draw roi
cv2.circle(circular_roi, (cX, cY), radius, 255, 1)
#bit-wise AND between thresholded hand using roi as the mask which gives cuts obtained using mask on the thresholded hand
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
#computing contours in the circular ROI
(_, cnts, _) = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#finger_cnt
count = 0
for c in cnts:
#compute the box of contour
(x, y, w, h) = cv2.boundingRect(c)
#increment the count of fingers only if -
#1. The contour region is not the wrist (bottom area)
#2. The number of points along the contour does not exceed 25% of the circumference of the circular ROI
if ((cY + (cY * 0.25)) > (y + h)) and ((circumference * 0.25) > c.shape[0]):
count += 1
return count
#-----------------------------------------------------------------------------------------------------------------------------------
def captureScreen(fingers):
global _cnt
with mss.mss() as sct:
filename = sct.shot(mon = -1, output = full_path + '\screenshot_{}.png'.format(str(_cnt)))
print(filename)
_cnt = _cnt + 1
#-----------------------------------------------------------------------------------------------------------------------------------
def compute():
#initialize weight for running average
alphaWeight = 0.5 #if we set lower value, running average will be performed over larger amt of previous frames and vice-a-versa
stream = 'http://192.168.0.4:8080/video'
#get the reference to the webcam
camera = cv2.VideoCapture(stream)
top, right, bottom, left = 10, 350, 225, 590 #ROI Co-ords
num_frames = 0 #initial number of frames
while True:
(_, frame) = camera.re | else:
#segment hand region
hand = segmentation(gray)
if hand is not None:
#unpack thresholded image and segmented region
(thresholded, segmented) = hand
| ad()
frame = imutils.resize(frame, width=700) #resize frame
frame = cv2.flip(frame, 1) #flip around x-axis -- dest(i,j) = src(i,cols-j-1)
clone = frame.copy()
(height, width) = frame.shape[:2] #get height and width of frame
#print(str(height) +" "+ str(width))
roi = frame[top:bottom, right:left] #get roi
#convert to grayscale and blur
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7,7), 0) #(src ,kernel_size(height and width), sigmaX and sigmaY both set to 0)
#https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=gaussianblur#gaussianblur
#to get background, keep computing running average till threshold is reached to caliberate our running average model
if num_frames < 30:
compute_running_average(gray, alphaWeight) | conditional_block |
cloudFoundryDeploy.go | = findMtar()
if err != nil {
return err
}
log.Entry().Debugf("Using mtar file '%s' found in workspace", mtarFilePath)
} else {
exists, err := fileUtils.FileExists(mtarFilePath)
if err != nil {
return errors.Wrapf(err, "Cannot check if file path '%s' exists", mtarFilePath)
}
if !exists {
return fmt.Errorf("mtar file '%s' retrieved from configuration does not exist", mtarFilePath)
}
log.Entry().Debugf("Using mtar file '%s' from configuration", mtarFilePath)
}
return deployMta(config, mtarFilePath, command)
}
type deployConfig struct {
DeployCommand string
DeployOptions []string
AppName string
ManifestFile string
SmokeTestScript []string
}
func handleCFNativeDeployment(config *cloudFoundryDeployOptions, command command.ExecRunner) error {
deployType, err := checkAndUpdateDeployTypeForNotSupportedManifest(config)
if err != nil {
return err
}
var deployCommand string
var smokeTestScript []string
var deployOptions []string
// deploy command will be provided by the prepare functions below
if deployType == "blue-green" {
deployCommand, deployOptions, smokeTestScript, err = prepareBlueGreenCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf native deployment. DeployType '%s'", deployType)
}
} else if deployType == "standard" {
deployCommand, deployOptions, smokeTestScript, err = prepareCfPushCfNativeDeploy(config)
if err != nil {
return errors.Wrapf(err, "Cannot prepare cf push native deployment. DeployType '%s'", deployType)
}
} else {
return fmt.Errorf("Invalid deploy type received: '%s'. Supported values: %v", deployType, []string{"blue-green", "standard"})
}
appName, err := getAppName(config)
if err != nil {
return err
}
manifestFile, err := getManifestFileName(config)
log.Entry().Infof("CF native deployment ('%s') with:", config.DeployType)
log.Entry().Infof("cfAppName='%s'", appName)
log.Entry().Infof("cfManifest='%s'", manifestFile)
log.Entry().Infof("cfManifestVariables: '%v'", config.ManifestVariables)
log.Entry().Infof("cfManifestVariablesFiles: '%v'", config.ManifestVariablesFiles)
log.Entry().Infof("cfdeployDockerImage: '%s'", config.DeployDockerImage)
log.Entry().Infof("smokeTestScript: '%s'", config.SmokeTestScript)
additionalEnvironment := []string{
"STATUS_CODE=" + strconv.FormatInt(int64(config.SmokeTestStatusCode), 10),
}
if len(config.DockerPassword) > 0 {
additionalEnvironment = append(additionalEnvironment, "CF_DOCKER_PASSWORD="+config.DockerPassword)
}
myDeployConfig := deployConfig{
DeployCommand: deployCommand,
DeployOptions: deployOptions,
AppName: config.AppName,
ManifestFile: config.Manifest,
SmokeTestScript: smokeTestScript,
}
log.Entry().Infof("DeployConfig: %v", myDeployConfig)
return deployCfNative(myDeployConfig, config, additionalEnvironment, command)
}
func deployCfNative(deployConfig deployConfig, config *cloudFoundryDeployOptions, additionalEnvironment []string, cmd command.ExecRunner) error {
deployStatement := []string{
deployConfig.DeployCommand,
}
if len(deployConfig.AppName) > 0 {
deployStatement = append(deployStatement, deployConfig.AppName)
}
if len(deployConfig.DeployOptions) > 0 {
deployStatement = append(deployStatement, deployConfig.DeployOptions...)
}
if len(deployConfig.ManifestFile) > 0 {
deployStatement = append(deployStatement, "-f")
deployStatement = append(deployStatement, deployConfig.ManifestFile)
}
if len(config.DeployDockerImage) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-image", config.DeployDockerImage)
}
if len(config.DockerUsername) > 0 && config.DeployType != "blue-green" {
deployStatement = append(deployStatement, "--docker-username", config.DockerUsername)
}
if len(deployConfig.SmokeTestScript) > 0 {
deployStatement = append(deployStatement, deployConfig.SmokeTestScript...)
}
if len(config.CfNativeDeployParameters) > 0 {
deployStatement = append(deployStatement, strings.Fields(config.CfNativeDeployParameters)...)
}
stopOldAppIfRunning := func(_cmd command.ExecRunner) error {
if config.KeepOldInstance && config.DeployType == "blue-green" {
oldAppName := deployConfig.AppName + "-old"
var buff bytes.Buffer
_cmd.Stdout(&buff)
defer func() {
_cmd.Stdout(log.Writer())
}()
err := _cmd.RunExecutable("cf", "stop", oldAppName)
if err != nil {
cfStopLog := buff.String()
if !strings.Contains(cfStopLog, oldAppName+" not found") {
return fmt.Errorf("Could not stop application '%s'. Error: %s", oldAppName, cfStopLog)
}
log.Entry().Infof("Cannot stop application '%s' since this appliation was not found.", oldAppName)
} else {
log.Entry().Infof("Old application '%s' has been stopped.", oldAppName)
}
}
return nil
}
return cfDeploy(config, deployStatement, additionalEnvironment, stopOldAppIfRunning, cmd)
}
func getManifest(name string) (cloudfoundry.Manifest, error) {
return cloudfoundry.ReadManifest(name)
}
func getManifestFileName(config *cloudFoundryDeployOptions) (string, error) |
func getAppName(config *cloudFoundryDeployOptions) (string, error) {
if len(config.AppName) > 0 {
return config.AppName, nil
}
if config.DeployType == "blue-green" {
return "", fmt.Errorf("Blue-green plugin requires app name to be passed (see https://github.com/bluemixgaragelondon/cf-blue-green-deploy/issues/27)")
}
manifestFile, err := getManifestFileName(config)
fileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !fileExists {
return "", fmt.Errorf("Manifest file '%s' not found. Cannot retrieve app name", manifestFile)
}
manifest, err := _getManifest(manifestFile)
if err != nil {
return "", err
}
apps, err := manifest.GetApplications()
if err != nil {
return "", err
}
if len(apps) == 0 {
return "", fmt.Errorf("No apps declared in manifest '%s'", manifestFile)
}
namePropertyExists, err := manifest.ApplicationHasProperty(0, "name")
if err != nil {
return "", err
}
if !namePropertyExists {
return "", fmt.Errorf("No appName available in manifest '%s'", manifestFile)
}
appName, err := manifest.GetApplicationProperty(0, "name")
if err != nil {
return "", err
}
var name string
var ok bool
if name, ok = appName.(string); !ok {
return "", fmt.Errorf("appName from manifest '%s' has wrong type", manifestFile)
}
if len(name) == 0 {
return "", fmt.Errorf("appName from manifest '%s' is empty", manifestFile)
}
return name, nil
}
func handleSmokeTestScript(smokeTestScript string) ([]string, error) {
if smokeTestScript == "blueGreenCheckScript.sh" {
// what should we do if there is already a script with the given name? Should we really overwrite ...
err := fileUtils.FileWrite(smokeTestScript, []byte(defaultSmokeTestScript), 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to write default smoke-test script: %w", err)
}
log.Entry().Debugf("smoke test script '%s' has been written.", smokeTestScript)
}
if len(smokeTestScript) > 0 {
err := fileUtils.Chmod(smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string | {
manifestFileName := config.Manifest
if len(manifestFileName) == 0 {
manifestFileName = "manifest.yml"
}
return manifestFileName, nil
} | identifier_body |
cloudFoundryDeploy.go | log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...)
}
if config.DeployType == "bg-deploy" || config.DeployType == "blue-green" {
deployCommand = "bg-deploy"
const noConfirmFlag = "--no-confirm"
if !piperutils.ContainsString(deployParams, noConfirmFlag) {
deployParams = append(deployParams, noConfirmFlag)
}
}
cfDeployParams := []string{
deployCommand,
mtarFilePath,
}
if len(deployParams) > 0 {
cfDeployParams = append(cfDeployParams, deployParams...)
}
extFileParams, extFiles := handleMtaExtensionDescriptors(config.MtaExtensionDescriptor)
for _, extFile := range extFiles {
_, err := fileUtils.Copy(extFile, extFile+".original")
if err != nil {
return fmt.Errorf("Cannot prepare mta extension files: %w", err)
}
_, _, err = handleMtaExtensionCredentials(extFile, config.MtaExtensionCredentials)
if err != nil {
return fmt.Errorf("Cannot handle credentials inside mta extension files: %w", err)
}
}
cfDeployParams = append(cfDeployParams, extFileParams...)
err := cfDeploy(config, cfDeployParams, nil, nil, command)
for _, extFile := range extFiles {
renameError := fileUtils.FileRename(extFile+".original", extFile)
if err == nil && renameError != nil {
return renameError
}
}
return err
}
func handleMtaExtensionCredentials(extFile string, credentials map[string]interface{}) (updated, containsUnresolved bool, err error) {
log.Entry().Debugf("Inserting credentials into extension file '%s'", extFile)
b, err := fileUtils.FileRead(extFile)
if err != nil {
return false, false, errors.Wrapf(err, "Cannot handle credentials for mta extension file '%s'", extFile)
}
content := string(b)
env, err := toMap(_environ(), "=")
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
missingCredentials := []string{}
for name, credentialKey := range credentials {
credKey, ok := credentialKey.(string)
if !ok {
return false, false, fmt.Errorf("cannot handle mta extension credentials: Cannot cast '%v' (type %T) to string", credentialKey, credentialKey)
}
const allowedVariableNamePattern = "^[-_A-Za-z0-9]+$"
alphaNumOnly := regexp.MustCompile(allowedVariableNamePattern)
if !alphaNumOnly.MatchString(name) {
return false, false, fmt.Errorf("credential key name '%s' contains unsupported character. Must contain only %s", name, allowedVariableNamePattern)
}
pattern := regexp.MustCompile("<%=\\s*" + name + "\\s*%>")
if pattern.MatchString(content) {
cred := env[toEnvVarKey(credKey)]
if len(cred) == 0 {
missingCredentials = append(missingCredentials, credKey)
continue
}
content = pattern.ReplaceAllLiteralString(content, cred)
updated = true
log.Entry().Debugf("Mta extension credentials handling: Placeholder '%s' has been replaced by credential denoted by '%s'/'%s' in file '%s'", name, credKey, toEnvVarKey(credKey), extFile)
} else {
log.Entry().Debugf("Mta extension credentials handling: Variable '%s' is not used in file '%s'", name, extFile)
}
}
if len(missingCredentials) > 0 {
missinCredsEnvVarKeyCompatible := []string{}
for _, missingKey := range missingCredentials {
missinCredsEnvVarKeyCompatible = append(missinCredsEnvVarKeyCompatible, toEnvVarKey(missingKey))
}
// ensure stable order of the entries. Needed e.g. for the tests.
sort.Strings(missingCredentials)
sort.Strings(missinCredsEnvVarKeyCompatible)
return false, false, fmt.Errorf("cannot handle mta extension credentials: No credentials found for '%s'/'%s'. Are these credentials maintained?", missingCredentials, missinCredsEnvVarKeyCompatible)
}
if !updated {
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has not been updated. Seems to contain no credentials.", extFile)
} else {
fInfo, err := fileUtils.Stat(extFile)
fMode := fInfo.Mode()
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
err = fileUtils.FileWrite(extFile, []byte(content), fMode)
if err != nil {
return false, false, errors.Wrap(err, "Cannot handle mta extension credentials.")
}
log.Entry().Debugf("Mta extension credentials handling: Extension file '%s' has been updated.", extFile)
}
re := regexp.MustCompile(`<%=.+%>`)
placeholders := re.FindAll([]byte(content), -1)
containsUnresolved = (len(placeholders) > 0)
if containsUnresolved {
log.Entry().Warningf("mta extension credential handling: Unresolved placeholders found after inserting credentials: %s", placeholders)
}
return updated, containsUnresolved, nil
}
func toEnvVarKey(key string) string {
key = regexp.MustCompile(`[^A-Za-z0-9]`).ReplaceAllString(key, "_")
return strings.ToUpper(regexp.MustCompile(`([a-z0-9])([A-Z])`).ReplaceAllString(key, "${1}_${2}"))
}
func | toMap | identifier_name | |
cloudFoundryDeploy.go | (smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
smokeTest, err := handleSmokeTestScript(config.SmokeTestScript)
if err != nil {
return "", []string{}, []string{}, err
}
var deployOptions = []string{}
if !config.KeepOldInstance {
deployOptions = append(deployOptions, "--delete-old-apps")
}
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !manifestFileExists {
log.Entry().Infof("Manifest file '%s' does not exist", manifestFile)
} else {
manifestVariables, err := toStringInterfaceMap(toParameterMap(config.ManifestVariables))
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare manifest variables: '%v'", config.ManifestVariables)
}
manifestVariablesFiles, err := validateManifestVariablesFiles(config.ManifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot validate manifest variables files '%v'", config.ManifestVariablesFiles)
}
modified, err := _replaceVariables(manifestFile, manifestVariables, manifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrap(err, "Cannot prepare manifest file")
}
if modified {
log.Entry().Infof("Manifest file '%s' has been updated (variable substitution)", manifestFile)
} else {
log.Entry().Infof("Manifest file '%s' has not been updated (no variable substitution)", manifestFile)
}
err = handleLegacyCfManifest(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot handle legacy manifest '%s'", manifestFile)
}
}
return "blue-green-deploy", deployOptions, smokeTest, nil
}
// validateManifestVariablesFiles: in case the only provided file is 'manifest-variables.yml' and this file does not
// exist we ignore that file. For any other file there is no check if that file exists. In case several files are
// provided we also do not check for the default file 'manifest-variables.yml'
func validateManifestVariablesFiles(manifestVariablesFiles []string) ([]string, error) {
const defaultManifestVariableFileName = "manifest-variables.yml"
if len(manifestVariablesFiles) == 1 && manifestVariablesFiles[0] == defaultManifestVariableFileName {
// we have only the default file. Most likely this is not configured, but we simply have the default.
// In case this file does not exist we ignore that file.
exists, err := fileUtils.FileExists(defaultManifestVariableFileName)
if err != nil {
return []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", defaultManifestVariableFileName)
}
if !exists {
return []string{}, nil
}
}
return manifestVariablesFiles, nil
}
func toParameterMap(parameters []string) (*orderedmap.OrderedMap, error) {
parameterMap := orderedmap.NewOrderedMap()
for _, p := range parameters {
keyVal := strings.Split(p, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("Invalid parameter provided (expected format <key>=<val>: '%s'", p)
}
parameterMap.Set(keyVal[0], keyVal[1])
}
return parameterMap, nil
}
func handleLegacyCfManifest(manifestFile string) error {
manifest, err := _getManifest(manifestFile)
if err != nil {
return err
}
err = manifest.Transform()
if err != nil {
return err
}
if manifest.IsModified() {
err = manifest.WriteManifest()
if err != nil {
return err
}
log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...)
}
if config.DeployType == "bg-deploy" || config.DeployType == "blue-green" {
deployCommand = "bg-deploy"
const noConfirmFlag = "--no-confirm"
if !piperutils.ContainsString(deployParams, noConfirmFlag) {
deployParams = append(deployParams, noConfirmFlag)
}
}
cfDeployParams := []string{
deployCommand,
mtarFilePath,
}
if len(deployParams) > 0 {
cfDeployParams = append(cfDeployParams, deployParams...)
}
extFileParams, extFiles := handleMtaExtensionDescriptors(config.MtaExtensionDescriptor)
for _, extFile := range extFiles {
_, err := fileUtils.Copy(extFile, extFile+".original")
if err != nil {
return fmt.Errorf("Cannot prepare mta extension files: %w", err)
}
_, _, err = handleMtaExtensionCredentials(extFile, config.MtaExtensionCredentials)
if err != nil | {
return fmt.Errorf("Cannot handle credentials inside mta extension files: %w", err)
} | conditional_block | |
cloudFoundryDeploy.go | 0, "name")
if err != nil {
return "", err
}
var name string
var ok bool
if name, ok = appName.(string); !ok {
return "", fmt.Errorf("appName from manifest '%s' has wrong type", manifestFile)
}
if len(name) == 0 {
return "", fmt.Errorf("appName from manifest '%s' is empty", manifestFile)
}
return name, nil
}
func handleSmokeTestScript(smokeTestScript string) ([]string, error) {
if smokeTestScript == "blueGreenCheckScript.sh" {
// what should we do if there is already a script with the given name? Should we really overwrite ...
err := fileUtils.FileWrite(smokeTestScript, []byte(defaultSmokeTestScript), 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to write default smoke-test script: %w", err)
}
log.Entry().Debugf("smoke test script '%s' has been written.", smokeTestScript)
}
if len(smokeTestScript) > 0 {
err := fileUtils.Chmod(smokeTestScript, 0755)
if err != nil {
return []string{}, fmt.Errorf("failed to make smoke-test script executable: %w", err)
}
pwd, err := fileUtils.Getwd()
if err != nil {
return []string{}, fmt.Errorf("failed to get current working directory for execution of smoke-test script: %w", err)
}
return []string{"--smoke-test", filepath.Join(pwd, smokeTestScript)}, nil
}
return []string{}, nil
}
func prepareBlueGreenCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
smokeTest, err := handleSmokeTestScript(config.SmokeTestScript)
if err != nil {
return "", []string{}, []string{}, err
}
var deployOptions = []string{}
if !config.KeepOldInstance {
deployOptions = append(deployOptions, "--delete-old-apps")
}
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", manifestFile)
}
if !manifestFileExists {
log.Entry().Infof("Manifest file '%s' does not exist", manifestFile)
} else {
manifestVariables, err := toStringInterfaceMap(toParameterMap(config.ManifestVariables))
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare manifest variables: '%v'", config.ManifestVariables)
}
manifestVariablesFiles, err := validateManifestVariablesFiles(config.ManifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot validate manifest variables files '%v'", config.ManifestVariablesFiles)
}
modified, err := _replaceVariables(manifestFile, manifestVariables, manifestVariablesFiles)
if err != nil {
return "", []string{}, []string{}, errors.Wrap(err, "Cannot prepare manifest file")
}
if modified {
log.Entry().Infof("Manifest file '%s' has been updated (variable substitution)", manifestFile)
} else {
log.Entry().Infof("Manifest file '%s' has not been updated (no variable substitution)", manifestFile)
}
err = handleLegacyCfManifest(manifestFile)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot handle legacy manifest '%s'", manifestFile)
}
}
return "blue-green-deploy", deployOptions, smokeTest, nil
}
// validateManifestVariablesFiles: in case the only provided file is 'manifest-variables.yml' and this file does not
// exist we ignore that file. For any other file there is no check if that file exists. In case several files are
// provided we also do not check for the default file 'manifest-variables.yml'
func validateManifestVariablesFiles(manifestVariablesFiles []string) ([]string, error) {
const defaultManifestVariableFileName = "manifest-variables.yml"
if len(manifestVariablesFiles) == 1 && manifestVariablesFiles[0] == defaultManifestVariableFileName {
// we have only the default file. Most likely this is not configured, but we simply have the default.
// In case this file does not exist we ignore that file.
exists, err := fileUtils.FileExists(defaultManifestVariableFileName)
if err != nil {
return []string{}, errors.Wrapf(err, "Cannot check if file '%s' exists", defaultManifestVariableFileName)
}
if !exists {
return []string{}, nil
}
}
return manifestVariablesFiles, nil
}
func toParameterMap(parameters []string) (*orderedmap.OrderedMap, error) {
parameterMap := orderedmap.NewOrderedMap()
for _, p := range parameters {
keyVal := strings.Split(p, "=")
if len(keyVal) != 2 {
return nil, fmt.Errorf("Invalid parameter provided (expected format <key>=<val>: '%s'", p)
}
parameterMap.Set(keyVal[0], keyVal[1])
}
return parameterMap, nil
}
func handleLegacyCfManifest(manifestFile string) error {
manifest, err := _getManifest(manifestFile)
if err != nil {
return err
}
err = manifest.Transform()
if err != nil {
return err
}
if manifest.IsModified() {
err = manifest.WriteManifest()
if err != nil {
return err
}
log.Entry().Infof("Manifest file '%s' was in legacy format has been transformed and updated.", manifestFile)
} else {
log.Entry().Debugf("Manifest file '%s' was not in legacy format. No transformation needed, no update performed.", manifestFile)
}
return nil
}
func prepareCfPushCfNativeDeploy(config *cloudFoundryDeployOptions) (string, []string, []string, error) {
deployOptions := []string{}
varOptions, err := _getVarsOptions(config.ManifestVariables)
if err != nil {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-options: '%v'", config.ManifestVariables)
}
varFileOptions, err := _getVarsFileOptions(config.ManifestVariablesFiles)
if err != nil {
if e, ok := err.(*cloudfoundry.VarsFilesNotFoundError); ok {
for _, missingVarFile := range e.MissingFiles {
log.Entry().Warningf("We skip adding not-existing file '%s' as a vars-file to the cf create-service-push call", missingVarFile)
}
} else {
return "", []string{}, []string{}, errors.Wrapf(err, "Cannot prepare var-file-options: '%v'", config.ManifestVariablesFiles)
}
}
deployOptions = append(deployOptions, varOptions...)
deployOptions = append(deployOptions, varFileOptions...)
return "push", deployOptions, []string{}, nil
}
func toStringInterfaceMap(in *orderedmap.OrderedMap, err error) (map[string]interface{}, error) {
out := map[string]interface{}{}
if err == nil {
for _, key := range in.Keys() {
if k, ok := key.(string); ok {
val, exists := in.Get(key)
if exists {
out[k] = val
} else {
return nil, fmt.Errorf("No entry found for '%v'", key)
}
} else {
return nil, fmt.Errorf("Cannot cast key '%v' to string", key)
}
}
}
return out, err
}
func checkAndUpdateDeployTypeForNotSupportedManifest(config *cloudFoundryDeployOptions) (string, error) {
manifestFile, err := getManifestFileName(config)
manifestFileExists, err := fileUtils.FileExists(manifestFile)
if err != nil {
return "", err
}
if config.DeployType == "blue-green" && manifestFileExists {
manifest, _ := _getManifest(manifestFile)
apps, err := manifest.GetApplications()
if err != nil {
return "", fmt.Errorf("failed to obtain applications from manifest: %w", err)
}
if len(apps) > 1 {
return "", fmt.Errorf("Your manifest contains more than one application. For blue green deployments your manifest file may contain only one application")
}
hasNoRouteProperty, err := manifest.ApplicationHasProperty(0, "no-route")
if err != nil {
return "", errors.Wrap(err, "Failed to obtain 'no-route' property from manifest")
}
if len(apps) == 1 && hasNoRouteProperty {
const deployTypeStandard = "standard"
log.Entry().Warningf("Blue green deployment is not possible for application without route. Using deployment type '%s' instead.", deployTypeStandard)
return deployTypeStandard, nil
}
}
return config.DeployType, nil
}
func deployMta(config *cloudFoundryDeployOptions, mtarFilePath string, command command.ExecRunner) error {
deployCommand := "deploy"
deployParams := []string{}
| if len(config.MtaDeployParameters) > 0 {
deployParams = append(deployParams, strings.Split(config.MtaDeployParameters, " ")...) | random_line_split | |
viewer.rs | .1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
{
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
},
WindowEvent::HiDpiFactorChanged(f) => {
*dpi_factor = f;
},
WindowEvent::DroppedFile(_path_buf) => {
() // TODO: drag file in
}
WindowEvent::MouseInput { button, state: Pressed, ..} => {
match button {
MouseButton::Left => {
orbit_controls.state = NavState::Rotating;
},
MouseButton::Right => {
orbit_controls.state = NavState::Panning;
}, | _ => ()
} | random_line_split | |
viewer.rs | {
let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
// .with_gl(gl_request)
// .with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn | (source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
| load | identifier_name |
viewer.rs | let gl_request = GlRequest::Specific(Api::OpenGl, (3, 3));
let gl_profile = GlProfile::Core;
let (events_loop, gl_window, dpi_factor, inner_size) =
if headless {
let headless_context = glutin::HeadlessRendererBuilder::new(width, height)
// .with_gl(gl_request)
// .with_gl_profile(gl_profile)
.build()
.unwrap();
unsafe { headless_context.make_current().unwrap() }
gl::load_with(|symbol| headless_context.get_proc_address(symbol) as *const _);
let framebuffer = Framebuffer::new(width, height);
framebuffer.bind();
unsafe { gl::Viewport(0, 0, width as i32, height as i32); }
(None, None, 1.0, PhysicalSize::new(width as f64, height as f64)) // TODO: real height (retina? (should be the same as PhysicalSize when headless?))
}
else {
// glutin: initialize and configure
let events_loop = glutin::EventsLoop::new();
let window_size = glutin::dpi::LogicalSize::new(width as f64, height as f64);
// TODO?: hints for 4.1, core profile, forward compat
let window = glutin::WindowBuilder::new()
.with_title("gltf-viewer")
.with_dimensions(window_size)
.with_visibility(visible);
let context = glutin::ContextBuilder::new()
.with_gl(gl_request)
.with_gl_profile(gl_profile)
.with_vsync(true);
let gl_window = glutin::GlWindow::new(window, context, &events_loop).unwrap();
// Real dimensions might be much higher on High-DPI displays
let dpi_factor = gl_window.get_hidpi_factor();
let inner_size = gl_window.get_inner_size().unwrap().to_physical(dpi_factor);
unsafe { gl_window.make_current().unwrap(); }
// gl: load all OpenGL function pointers
gl::load_with(|symbol| gl_window.get_proc_address(symbol) as *const _);
(Some(events_loop), Some(gl_window), dpi_factor, inner_size)
};
let mut orbit_controls = OrbitControls::new(
Point3::new(0.0, 0.0, 2.0),
inner_size);
orbit_controls.camera = Camera::default();
orbit_controls.camera.fovy = camera_options.fovy;
orbit_controls.camera.update_aspect_ratio(inner_size.width as f32 / inner_size.height as f32); // updates projection matrix
let first_mouse = true;
let last_x: f32 = inner_size.width as f32 / 2.0;
let last_y: f32 = inner_size.height as f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else | ;
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
| {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
} | conditional_block |
viewer.rs | f32 / 2.0;
unsafe {
print_context_info();
gl::ClearColor(0.0, 1.0, 0.0, 1.0); // green for debugging
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
if headless || !visible {
// transparent background for screenshots
gl::ClearColor(0.0, 0.0, 0.0, 0.0);
}
else {
gl::ClearColor(0.1, 0.2, 0.3, 1.0);
}
gl::Enable(gl::DEPTH_TEST);
// TODO: keyboard switch?
// draw in wireframe
// gl::PolygonMode(gl::FRONT_AND_BACK, gl::LINE);
};
let (root, scene) = Self::load(source, scene_index);
let mut viewer = GltfViewer {
size: inner_size,
dpi_factor,
orbit_controls,
first_mouse, last_x, last_y,
events_loop,
gl_window,
root,
scene,
delta_time: 0.0, // seconds
last_frame: Instant::now(),
render_timer: FrameTimer::new("rendering", 300),
};
unsafe { gl_check_error!(); };
if camera_options.index != 0 && camera_options.index >= viewer.root.camera_nodes.len() as i32 {
error!("No camera with index {} found in glTF file (max: {})",
camera_options.index, viewer.root.camera_nodes.len() as i32 - 1);
process::exit(2)
}
if !viewer.root.camera_nodes.is_empty() && camera_options.index != -1 {
let cam_node = &viewer.root.get_camera_node(camera_options.index as usize);
let cam_node_info = format!("{} ({:?})", cam_node.index, cam_node.name);
let cam = cam_node.camera.as_ref().unwrap();
info!("Using camera {} on node {}", cam.description(), cam_node_info);
viewer.orbit_controls.set_camera(cam, &cam_node.final_transform);
if camera_options.position.is_some() || camera_options.target.is_some() {
warn!("Ignoring --cam-pos / --cam-target since --cam-index is given.")
}
} else {
info!("Determining camera view from bounding box");
viewer.set_camera_from_bounds(camera_options.straight);
if let Some(p) = camera_options.position {
viewer.orbit_controls.position = Point3::from_vec(p)
}
if let Some(target) = camera_options.target {
viewer.orbit_controls.target = Point3::from_vec(target)
}
}
viewer
}
pub fn load(source: &str, scene_index: usize) -> (Root, Scene) {
let mut start_time = Instant::now();
// TODO!: http source
// let gltf =
if source.starts_with("http") {
panic!("not implemented: HTTP support temporarily removed.")
// let http_source = HttpSource::new(source);
// let import = gltf::Import::custom(http_source, Default::default());
// let gltf = import_gltf(import);
// println!(); // to end the "progress dots"
// gltf
}
// else {
let (doc, buffers, images) = match gltf::import(source) {
Ok(tuple) => tuple,
Err(err) => {
error!("glTF import failed: {:?}", err);
if let gltf::Error::Io(_) = err {
error!("Hint: Are the .bin file(s) referenced by the .gltf file available?")
}
process::exit(1)
},
};
let imp = ImportData { doc, buffers, images };
print_elapsed("Imported glTF in ", &start_time);
start_time = Instant::now();
// load first scene
if scene_index >= imp.doc.scenes().len() {
error!("Scene index too high - file has only {} scene(s)", imp.doc.scenes().len());
process::exit(3)
}
let base_path = Path::new(source);
let mut root = Root::from_gltf(&imp, base_path);
let scene = Scene::from_gltf(&imp.doc.scenes().nth(scene_index).unwrap(), &mut root);
print_elapsed(&format!("Loaded scene with {} nodes, {} meshes in ",
imp.doc.nodes().count(), imp.doc.meshes().len()), &start_time);
(root, scene)
}
/// determine "nice" camera perspective from bounding box. Inspired by donmccurdy/three-gltf-viewer
fn set_camera_from_bounds(&mut self, straight: bool) {
let bounds = &self.scene.bounds;
let size = (bounds.max - bounds.min).magnitude();
let center = bounds.center();
// TODO: x,y addition optional
let cam_pos = if straight {
Point3::new(
center.x,
center.y,
center.z + size * 0.75,
)
} else {
Point3::new(
center.x + size / 2.0,
center.y + size / 5.0,
center.z + size / 2.0,
)
};
self.orbit_controls.position = cam_pos;
self.orbit_controls.target = center;
self.orbit_controls.camera.znear = size / 100.0;
self.orbit_controls.camera.zfar = Some(size * 20.0);
self.orbit_controls.camera.update_projection_matrix();
}
pub fn start_render_loop(&mut self) {
loop {
// per-frame time logic
// NOTE: Deliberately ignoring the seconds of `elapsed()`
self.delta_time = f64::from(self.last_frame.elapsed().subsec_nanos()) / 1_000_000_000.0;
self.last_frame = Instant::now();
// events
let keep_running = process_events(
&mut self.events_loop.as_mut().unwrap(), self.gl_window.as_mut().unwrap(),
&mut self.orbit_controls,
&mut self.dpi_factor,
&mut self.size);
if !keep_running {
unsafe { gl_check_error!(); } // final error check so errors don't go unnoticed
break
}
self.orbit_controls.frame_update(self.delta_time); // keyboard navigation
self.draw();
self.gl_window.as_ref().unwrap().swap_buffers().unwrap();
}
}
// Returns whether to keep running
pub fn draw(&mut self) {
// render
unsafe {
self.render_timer.start();
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
let cam_params = self.orbit_controls.camera_params();
self.scene.draw(&mut self.root, &cam_params);
self.render_timer.end();
}
}
pub fn screenshot(&mut self, filename: &str) {
self.draw();
let mut img = DynamicImage::new_rgba8(self.size.width as u32, self.size.height as u32);
unsafe {
let pixels = img.as_mut_rgba8().unwrap();
gl::PixelStorei(gl::PACK_ALIGNMENT, 1);
gl::ReadPixels(0, 0, self.size.width as i32, self.size.height as i32, gl::RGBA,
gl::UNSIGNED_BYTE, pixels.as_mut_ptr() as *mut c_void);
gl_check_error!();
}
let img = img.flipv();
if let Err(err) = img.save(filename) {
error!("{}", err);
}
else {
println!("Saved {}x{} screenshot to {}", self.size.width, self.size.height, filename);
}
}
pub fn multiscreenshot(&mut self, filename: &str, count: u32) {
let min_angle : f32 = 0.0 ;
let max_angle : f32 = 2.0 * PI ;
let increment_angle : f32 = ((max_angle - min_angle)/(count as f32)) as f32;
for i in 1..(count+1) {
self.orbit_controls.rotate_object(increment_angle);
let dot = filename.rfind('.').unwrap_or_else(|| filename.len());
let mut actual_name = filename.to_string();
actual_name.insert_str(dot, &format!("_{}", i));
self.screenshot(&actual_name[..]);
}
}
}
#[allow(too_many_arguments)]
fn process_events(
events_loop: &mut glutin::EventsLoop,
gl_window: &glutin::GlWindow,
mut orbit_controls: &mut OrbitControls,
dpi_factor: &mut f64,
size: &mut PhysicalSize) -> bool
| {
let mut keep_running = true;
#[allow(single_match)]
events_loop.poll_events(|event| {
match event {
glutin::Event::WindowEvent{ event, .. } => match event {
WindowEvent::CloseRequested => {
keep_running = false;
},
WindowEvent::Destroyed => {
// Log and exit?
panic!("WindowEvent::Destroyed, unimplemented.");
},
WindowEvent::Resized(logical) => {
let ph = logical.to_physical(*dpi_factor);
gl_window.resize(ph);
*size = ph;
orbit_controls.camera.update_aspect_ratio((ph.width / ph.height) as f32);
orbit_controls.screen_size = ph;
}, | identifier_body | |
cq_reactor.rs | use super::{CmdId, CqId, InterruptSources, Nvme, NvmeComp, NvmeCmd, SqId};
/// A notification request, sent by the future in order to tell the completion thread that the
/// current task wants a notification when a matching completion queue entry has been seen.
#[derive(Debug)]
pub enum NotifReq {
RequestCompletion {
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
waker: task::Waker,
// TODO: Get rid of this allocation, or maybe a thread-local vec for reusing.
// TODO: Maybe the `remem` crate.
message: Arc<Mutex<Option<CompletionMessage>>>,
},
RequestAvailSubmission {
sq_id: SqId,
waker: task::Waker,
}
}
enum PendingReq {
PendingCompletion {
waker: task::Waker,
message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
}
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message, .. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker, .. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id, .. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id, .. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn start_cq_reactor_thread(
nvme: Arc<N | random_line_split | ||
cq_reactor.rs | message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
}
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message, .. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker, .. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id, .. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id, .. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn | (
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFutureState | start_cq_reactor_thread | identifier_name |
cq_reactor.rs | message: Arc<Mutex<Option<CompletionMessage>>>,
cq_id: CqId,
sq_id: SqId,
cmd_id: CmdId,
},
PendingAvailSubmission {
waker: task::Waker,
sq_id: SqId,
},
}
struct CqReactor {
int_sources: InterruptSources,
nvme: Arc<Nvme>,
pending_reqs: Vec<PendingReq>,
// used to store commands that may be completed before a completion is requested
receiver: Receiver<NotifReq>,
event_queue: File,
}
impl CqReactor {
fn create_event_queue(int_sources: &mut InterruptSources) -> Result<File> |
fn new(
nvme: Arc<Nvme>,
mut int_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> Result<Self> {
Ok(Self {
event_queue: Self::create_event_queue(&mut int_sources)?,
int_sources,
nvme,
pending_reqs: Vec::new(),
receiver,
})
}
fn handle_notif_reqs_raw(pending_reqs: &mut Vec<PendingReq>, receiver: &Receiver<NotifReq>, block_until_first: bool) {
let mut blocking_iter;
let mut nonblocking_iter;
let iter: &mut dyn Iterator<Item = NotifReq> = if block_until_first {
blocking_iter = std::iter::once(receiver.recv().unwrap()).chain(receiver.try_iter());
&mut blocking_iter
} else {
nonblocking_iter = receiver.try_iter();
&mut nonblocking_iter
};
for req in iter {
log::trace!("Got notif req: {:?}", req);
match req {
NotifReq::RequestCompletion {
sq_id,
cq_id,
cmd_id,
waker,
message,
} => pending_reqs.push(PendingReq::PendingCompletion {
sq_id,
cq_id,
cmd_id,
message,
waker,
}),
NotifReq::RequestAvailSubmission { sq_id, waker } => pending_reqs.push(PendingReq::PendingAvailSubmission { sq_id, waker, }),
}
}
}
fn poll_completion_queues(&mut self, iv: u16) -> Option<()> {
let ivs_read_guard = self.nvme.cqs_for_ivs.read().unwrap();
let cqs_read_guard = self.nvme.completion_queues.read().unwrap();
let mut entry_count = 0;
let cq_ids = ivs_read_guard.get(&iv)?;
for cq_id in cq_ids.iter().copied() {
let mut completion_queue_guard = cqs_read_guard.get(&cq_id)?.lock().unwrap();
let &mut (ref mut completion_queue, _) = &mut *completion_queue_guard;
while let Some((head, entry)) = completion_queue.complete(None) {
unsafe { self.nvme.completion_queue_head(cq_id, head) };
log::trace!("Got completion queue entry (CQID {}): {:?} at {}", cq_id, entry, head);
{
let submission_queues_read_lock = self.nvme.submission_queues.read().unwrap();
// this lock is actually important, since it will block during submission from other
// threads. the lock won't be held for long by the submitters, but it still prevents
// the entry being lost before this reactor is actually able to respond:
let &(ref sq_lock, corresponding_cq_id) = submission_queues_read_lock.get(&{entry.sq_id}).expect("nvmed: internal error: queue returned from controller doesn't exist");
assert_eq!(cq_id, corresponding_cq_id);
let mut sq_guard = sq_lock.lock().unwrap();
sq_guard.head = entry.sq_head;
// the channel still has to be polled twice though:
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, false);
}
Self::try_notify_futures(&mut self.pending_reqs, cq_id, &entry);
entry_count += 1;
}
}
if entry_count == 0 {}
Some(())
}
fn finish_pending_completion(pending_reqs: &mut Vec<PendingReq>, req_cq_id: CqId, cq_id: CqId, sq_id: SqId, cmd_id: CmdId, entry: &NvmeComp, i: usize) -> bool {
if req_cq_id == cq_id
&& sq_id == entry.sq_id
&& cmd_id == entry.cid
{
let (waker, message) = match pending_reqs.remove(i) {
PendingReq::PendingCompletion { waker, message, .. } => (waker, message),
_ => unreachable!(),
};
*message.lock().unwrap() = Some(CompletionMessage { cq_entry: *entry });
waker.wake();
true
} else {
false
}
}
fn finish_pending_avail_submission(pending_reqs: &mut Vec<PendingReq>, sq_id: SqId, entry: &NvmeComp, i: usize) -> bool {
if sq_id == entry.sq_id {
let waker = match pending_reqs.remove(i) {
PendingReq::PendingAvailSubmission { waker, .. } => waker,
_ => unreachable!(),
};
waker.wake();
true
} else {
false
}
}
fn try_notify_futures(pending_reqs: &mut Vec<PendingReq>, cq_id: CqId, entry: &NvmeComp) -> Option<()> {
let mut i = 0usize;
let mut futures_notified = 0;
while i < pending_reqs.len() {
match &pending_reqs[i] {
&PendingReq::PendingCompletion { cq_id: req_cq_id, sq_id, cmd_id, .. } => if Self::finish_pending_completion(pending_reqs, req_cq_id, cq_id, sq_id, cmd_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
&PendingReq::PendingAvailSubmission { sq_id, .. } => if Self::finish_pending_avail_submission(pending_reqs, sq_id, entry, i) {
futures_notified += 1;
} else {
i += 1;
}
}
}
if futures_notified == 0 {}
Some(())
}
fn run(mut self) {
log::debug!("Running CQ reactor");
let mut event = Event::default();
let mut irq_word = [0u8; 8]; // stores the IRQ count
const WORD_SIZE: usize = mem::size_of::<usize>();
loop {
let block_until_first = self.pending_reqs.is_empty();
Self::handle_notif_reqs_raw(&mut self.pending_reqs, &self.receiver, block_until_first);
log::trace!("Handled notif reqs");
// block on getting the next event
if self.event_queue.read(&mut event).unwrap() == 0 {
// event queue has been destroyed
break;
}
let (vector, irq_handle) = match self.int_sources.iter_mut().nth(event.data) {
Some(s) => s,
None => continue,
};
if irq_handle.read(&mut irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
// acknowledge the interrupt (only necessary for level-triggered INTx# interrups)
if irq_handle.write(&irq_word[..WORD_SIZE]).unwrap() == 0 {
continue;
}
log::trace!("NVME IRQ: vector {}", vector);
self.nvme.set_vector_masked(vector, true);
self.poll_completion_queues(vector);
self.nvme.set_vector_masked(vector, false);
}
}
}
pub fn start_cq_reactor_thread(
nvme: Arc<Nvme>,
interrupt_sources: InterruptSources,
receiver: Receiver<NotifReq>,
) -> thread::JoinHandle<()> {
// Actually, nothing prevents us from spawning additional threads. the channel is MPMC and
// everything is properly synchronized. I'm not saying this is strictly required, but with
// multiple completion queues it might actually be worth considering. The (in-kernel) IRQ
// subsystem can have some room for improvement regarding lowering the latency, but MSI-X allows
// multiple vectors to point to different CPUs, so that the load can be balanced across the
// logical processors.
thread::spawn(move || {
CqReactor::new(nvme, interrupt_sources, receiver)
.expect("nvmed: failed to setup CQ reactor")
.run()
})
}
#[derive(Debug)]
pub struct CompletionMessage {
cq_entry: NvmeComp,
}
pub enum CompletionFuture | {
use syscall::flag::*;
let fd = syscall::open("event:", O_CLOEXEC | O_RDWR)?;
let mut file = unsafe { File::from_raw_fd(fd as RawFd) };
for (num, irq_handle) in int_sources.iter_mut() {
if file
.write(&Event {
id: irq_handle.as_raw_fd() as usize,
flags: syscall::EVENT_READ,
data: num as usize,
})
.unwrap()
== 0
{
panic!("Failed to setup event queue for {} {:?}", num, irq_handle);
}
}
Ok(file)
} | identifier_body |
xcbwin.rs |
}
// If no depth matches return root visual
return (scr.root_visual(), scr.root_depth());
}
pub struct XCB {
conn: Arc<Connection>,
scr_num: i32,
win: Window,
root: Window,
bufpix: Pixmap,
gc: Gcontext,
colour: Colormap,
visual: Visualid,
depth: u8,
size: (u16, u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x | {
for v in d.visuals() {
return (v.visual_id(), depth);
}
} | conditional_block | |
xcbwin.rs | u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
| impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
| self.pos = (x as i16, y as i16);
}
}
| random_line_split |
xcbwin.rs | u16), // (w, h)
pos: (i16, i16), // (x, y)
scr_size: (u16, u16),
bottom: bool,
click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>>,
}
impl XCB {
pub fn new() -> XCB {
// Create XCB struct to return
let (conn, scr_num) = {
let (conn, scr_num) = Connection::connect(None).unwrap();
(Arc::new(conn), scr_num)
};
let win = conn.generate_id();
let gc = conn.generate_id(); // The GC is created later
let colour = conn.generate_id();
let click_fn: Arc<Mutex<Box<Fn(i16, i16, u8) + Sync + Send>>> =
Arc::new(Mutex::new(Box::new(|_, _, _| {} // Placeholder closure
)));
let bufpix = conn.generate_id(); // Pixmap created later
let size = (1u16, 1u16); // default size
let root;
let visual;
let depth;
let mut scr_size = (0u16, 0u16);
{
let screen = conn.get_setup()
.roots()
.nth(scr_num as usize)
.unwrap();
scr_size.0 = screen.width_in_pixels();
scr_size.1 = screen.height_in_pixels();
root = screen.root();
let (v, d) = get_visualid_from_depth(screen, 32);
visual = v;
depth = d;
}
let x = XCB {
conn,
scr_num,
win,
root,
bufpix,
gc,
colour,
visual,
depth,
size,
pos: (0, 0),
scr_size,
bottom: false,
click_fn,
};
// Create the window
// Masks to use
create_colormap(&*x.conn, COLORMAP_ALLOC_NONE as u8,
x.colour, x.root,
x.visual)
.request_check().unwrap();
let values = [
(CW_EVENT_MASK, EVENT_MASK_BUTTON_PRESS | EVENT_MASK_EXPOSURE),
(CW_BACK_PIXEL, 0),
(CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn | (&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
| get_atom | identifier_name |
xcbwin.rs | (CW_COLORMAP, x.colour),
(CW_BORDER_PIXEL, 0),
];
create_window(&*x.conn,
x.depth,
x.win,
x.root,
x.pos.0, x.pos.1,
x.size.0, x.size.1,
0,
WINDOW_CLASS_INPUT_OUTPUT as u16,
x.visual,
&values)
.request_check().unwrap();
let title = "lemonade";
change_property(&*x.conn, xcb::PROP_MODE_REPLACE as u8, x.win,
xcb::ATOM_WM_NAME, xcb::ATOM_STRING, 8, title.as_bytes());
create_gc(&*x.conn, x.gc, x.win, &[]);
create_pixmap(&*x.conn, x.depth, x.bufpix,
x.win, x.size.0, x.size.1);
// Create event-monitoring thread
let conn = x.conn.clone();
let click_fn = x.click_fn.clone();
let win = x.win;
let bufpix = x.bufpix;
let gc = x.gc;
thread::spawn(move || {
while let Some(e) = conn.wait_for_event() {
match e.response_type() as u8 {
BUTTON_PRESS => {
let e: &ButtonPressEvent = unsafe {
cast_event(&e)
};
let (x, y) = (e.event_x(), e.event_y());
let b = e.detail();
let f = click_fn.lock().unwrap();
f(x, y, b);
}
EXPOSE => {
let e: &ExposeEvent = unsafe {
cast_event(&e)
};
let w = e.width();
let h = e.height();
let x = e.x() as i16;
let y = e.y() as i16;
copy_area(&*conn, bufpix, win, gc,
x, y, x, y, w, h);
conn.flush();
}
_ => {}
}
}
println!("ERROR");
});
return x;
}
fn map_window(&self) {
map_window(&self.conn, self.win);
}
fn unmap_window(&self) {
unmap_window(&self.conn, self.win);
}
fn reposition_window(&mut self) {
self.unmap_window();
let mut data: [i16; 12] = [
0, 0, 0, 0, // left, right, top, bottom
0, 0, // left offset
0, 0, // right offset
0, 0, // top offset
0, 0, // bottom offset
];
let curr_x = self.pos.0;
let (xb, xe) = (curr_x, curr_x + self.size.0 as i16);
let ypos;
if self.bottom {
ypos = self.scr_size.1 as i16 - self.size.1 as i16;
data[2] = 0; // top offset
data[3] = self.size.1 as i16;
data[8] = 0; data[9] = 0;
data[10] = xb; data[11] = xe;
} else {
ypos = 0;
data[2] = self.size.1 as i16;
data[3] = 0; // bottom offset
data[8] = xb; data[9] = xe;
data[10] = 0; data[11] = 0;
}
self.set_pos(curr_x as u16, ypos as u16);
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_STRUT_PARTIAL"),
ATOM_ATOM,
16,
&data);
self.map_window();
}
fn get_atom(&self, name: &str) -> Atom {
let atom = intern_atom(&self.conn, false, name);
atom.get_reply().unwrap().atom()
}
fn get_screen(&self) -> Screen {
let setup = self.conn.get_setup();
let screen = setup.roots().nth(self.scr_num as usize).unwrap();
return screen;
}
fn get_visual(&self) -> Visualtype {
for d in self.get_screen().allowed_depths() {
for v in d.visuals() {
if v.visual_id() == self.visual {
return v;
}
}
}
panic!("Failed to find visual type");
}
/// Set a new size for the window.
///
/// Note: This clears the buffer, so make sure to draw
/// after setting the size and not before. Else, the
/// drawn image is lost.
fn set_size(&mut self, w: u16, h: u16) {
// Update the pixmap to match new size
free_pixmap(&self.conn, self.bufpix);
create_pixmap(&self.conn, self.depth, self.bufpix,
self.win, w, h);
// Clear the new pixmap
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_CLEAR)]);
copy_area(&*self.conn, self.bufpix, self.bufpix, self.gc,
0, 0, 0, 0, w, h);
change_gc(&*self.conn, self.gc, &[(GC_FUNCTION, GX_COPY)]);
// Set the size
configure_window(&*self.conn, self.win, &[
(CONFIG_WINDOW_WIDTH as u16, w as u32),
(CONFIG_WINDOW_HEIGHT as u16, h as u32),
]).request_check()
.unwrap();
self.size = (w, h);
}
/// Set the internal position value.
///
/// Cannot move the window if it is docked. The `reposition_window` method
/// must be used if it is docked.
fn set_pos(&mut self, x: u16, y: u16) {
configure_window(&self.conn, self.win, &[
(CONFIG_WINDOW_X as u16, x as u32),
(CONFIG_WINDOW_Y as u16, y as u32),
]).request_check()
.unwrap();
self.pos = (x as i16, y as i16);
}
}
impl Dock for XCB {
fn create_surface(&self) -> cairo::Surface {
// Prepare cairo variables
let cr_conn = unsafe {
cairo::XCBConnection::from_raw_none(
self.conn.get_raw_conn() as *mut cairo_sys::xcb_connection_t)
};
let cr_draw = cairo::XCBDrawable(self.bufpix);
let cr_visual = unsafe {
cairo::XCBVisualType::from_raw_none(
&mut self.get_visual().base as *mut ffi::xcb_visualtype_t
as *mut cairo_sys::xcb_visualtype_t)
};
// Create the surface using previous variables
return cairo::Surface::create(
&cr_conn, &cr_draw, &cr_visual,
self.size.0 as i32, self.size.1 as i32);
}
fn dock(&self) {
let data = [
self.get_atom("_NET_WM_WINDOW_TYPE_DOCK"),
];
change_property(&self.conn,
PROP_MODE_REPLACE as u8,
self.win,
self.get_atom("_NET_WM_WINDOW_TYPE"),
xcb::ATOM_ATOM,
32,
&data)
.request_check()
.expect("Failed to dock window");
}
fn top(&mut self) {
self.bottom = false;
self.reposition_window();
}
fn bottom(&mut self) {
self.bottom = true;
self.reposition_window();
}
fn set_size(&mut self, w: u16, h: u16) {
self.set_size(w, h);
}
fn set_offset(&mut self, x: u16, y: u16) {
if self.bottom {
let screen_height = self.scr_size.1;
self.set_pos(x, screen_height - y);
} else {
self.set_pos(x, y);
}
self.reposition_window();
}
fn get_screen_size(&self) -> (u16, u16) {
(self.scr_size.0, self.scr_size.1)
}
fn flush(&self) {
copy_area(&*self.conn, self.bufpix, self.win, self.gc,
0, 0, 0, 0, self.size.0, self.size.1);
self.conn.flush();
}
fn click_cb<F>(&mut self, f: F)
where F: Fn(i16, i16, u8) + Send + Sync + 'static {
let mut cb = self.click_fn.lock().unwrap();
*cb = Box::new(f);
}
}
impl Drop for XCB {
fn drop(&mut self) | {
free_pixmap(&*self.conn, self.win);
free_pixmap(&*self.conn, self.bufpix);
free_gc(&*self.conn, self.gc);
free_colormap(&*self.conn, self.colour);
} | identifier_body | |
lib.rs | eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if !julia_bindir.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
}
if !image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results. The frame will preallocate `slots` slots.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope_with_slots(1, |_global, frame| {
/// // Uses the preallocated slot
/// let _i = Value::new(&mut *frame, 1u64)?;
/// // Allocates a new slot, because only a single slot was preallocated
/// let _j = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope_with_slots<T, F>(&mut self, slots: usize, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
if slots + 2 > self.page.size() {
self.page = StackPage::new(slots + 2);
}
let mut frame = GcFrame::new(self.page.as_mut(), slots, Sync);
func(global, &mut frame)
}
}
/// Provides access to global information.
pub fn info(&self) -> Info {
Info::new()
}
}
impl Drop for Julia {
fn drop(&mut self) {
unsafe {
jl_atexit_hook(0);
}
}
}
/// When you call Rust from Julia through `ccall`, Julia has already been initialized and trying to
/// initialize it again would cause a crash. In order to still be able to call Julia from Rust
/// and to borrow arrays (if you pass them as `Array` rather than `Ptr{Array}`), you'll need to
/// create a frame first. You can use this struct to do so. It must never be used outside
/// functions called through `ccall`, and only once for each `ccall`ed function.
///
/// If you only need to use a frame to borrow array data, you can use [`CCall::null_scope`].
/// Unlike [`Julia`], `CCall` postpones the allocation of the stack that is used for managing the
/// GC until a `GcFrame` is created. In the case of a null scope, this stack isn't allocated at
/// all.
pub struct CCall {
page: Option<StackPage>,
}
impl CCall {
/// Create a new `CCall`. This function must never be called outside a function called through
/// `ccall` from Julia and must only be called once during that call. The stack is not
/// allocated until a [`GcFrame`] is created.
pub unsafe fn new() -> Self {
CCall { page: None }
}
/// Wake the task associated with `handle`. The handle must be the `handle` field of a
/// `Base.AsyncCondition` in Julia. This can be used to call a long-running Rust function from
/// Julia with ccall in another thread and wait for it to complete in Julia without blocking,
/// there's an example available in the repository: ccall_with_threads.
///
/// This method is only available if the `uv` feature is enabled.
#[cfg(feature = "uv")]
pub unsafe fn uv_async_send(handle: *mut std::ffi::c_void) -> bool {
uv_async_send(handle.cast()) == 0
}
/// Creates a [`GcFrame`], calls the given closure, and returns its result.
pub fn | scope | identifier_name | |
lib.rs | ScopeExt
//! [`ScopeExt::scope`]: crate::memory::scope::ScopeExt::scope
//! [`Scope`]: crate::memory::scope::Scope
//! [`Scope::value_scope`]: crate::memory::scope::Scope::value_scope
//! [`Scope::result_scope`]: crate::memory::scope::Scope::result_scope
#![forbid(rustdoc::broken_intra_doc_links)]
pub mod convert;
pub mod error;
pub mod extensions;
pub mod info;
pub mod layout;
pub mod memory;
pub mod prelude;
pub(crate) mod private;
#[doc(hidden)]
pub mod util;
pub mod wrappers;
use convert::into_jlrs_result::IntoJlrsResult;
use error::{JlrsError, JlrsResult, CANNOT_DISPLAY_VALUE};
use info::Info;
#[cfg(feature = "uv")]
use jl_sys::uv_async_send;
use jl_sys::{
jl_array_dims_ptr, jl_array_ndims, jl_atexit_hook, jl_init, jl_init_with_image,
jl_is_initialized,
};
use memory::frame::{GcFrame, NullFrame};
use memory::global::Global;
use memory::mode::Sync;
use memory::stack_page::StackPage;
use prelude::Wrapper;
use private::Private;
use std::ffi::CString;
use std::io::{Error as IOError, ErrorKind};
use std::mem::{self, MaybeUninit};
use std::path::Path;
use std::ptr::null_mut;
use std::slice;
use std::sync::atomic::{AtomicBool, Ordering};
use wrappers::ptr::module::Module;
use wrappers::ptr::string::JuliaString;
use wrappers::ptr::value::Value;
use wrappers::ptr::{array::Array, call::Call, private::Wrapper as _};
pub(crate) static INIT: AtomicBool = AtomicBool::new(false);
pub(crate) static JLRS_JL: &'static str = include_str!("jlrs.jl");
/// A Julia instance. You must create it with [`Julia::init`] or [`Julia::init_with_image`]
/// before you can do anything related to Julia. While this struct exists Julia is active,
/// dropping it causes the shutdown code to be called but this doesn't leave Julia in a state from which it can be reinitialized.
pub struct Julia {
page: StackPage,
}
impl Julia {
/// Initialize Julia, this method can only be called once. If it's called a second time it
/// will return an error. If this struct is dropped, you will need to restart your program to
/// be able to call Julia code again.
///
/// This method is unsafe because it can race with another crate initializing Julia.
pub unsafe fn init() -> JlrsResult<Self> {
if jl_is_initialized() != 0 || INIT.swap(true, Ordering::SeqCst) {
return Err(JlrsError::AlreadyInitialized.into());
}
jl_init();
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// This method is similar to [`Julia::init`] except that it loads a custom system image. A
/// custom image can be generated with the [`PackageCompiler`] package for Julia. The main
/// advantage of using a custom image over the default one is that it allows you to avoid much
/// of the compilation overhead often associated with Julia.
///
/// Two arguments are required to call this method compared to [`Julia::init`];
/// `julia_bindir` and `image_relative_path`. The first must be the absolute path to a
/// directory that contains a compatible Julia binary (eg `${JULIA_DIR}/bin`), the second must
/// be either an absolute or a relative path to a system image.
///
/// This method will return an error if either of the two paths doesn't exist or if Julia
/// has already been initialized. It is unsafe because it can race with another crate
/// initializing Julia.
///
/// [`PackageCompiler`]: https://julialang.github.io/PackageCompiler.jl/dev/
pub unsafe fn init_with_image<P: AsRef<Path>, Q: AsRef<Path>>(
julia_bindir: P,
image_path: Q,
) -> JlrsResult<Self> {
if INIT.swap(true, Ordering::SeqCst) {
Err(JlrsError::AlreadyInitialized)?;
}
let julia_bindir_str = julia_bindir.as_ref().to_string_lossy().to_string();
let image_path_str = image_path.as_ref().to_string_lossy().to_string();
if !julia_bindir.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, julia_bindir_str);
return Err(JlrsError::other(io_err))?;
}
if !image_path.as_ref().exists() {
let io_err = IOError::new(ErrorKind::NotFound, image_path_str);
return Err(JlrsError::other(io_err))?;
}
let bindir = CString::new(julia_bindir_str).unwrap();
let im_rel_path = CString::new(image_path_str).unwrap();
jl_init_with_image(bindir.as_ptr(), im_rel_path.as_ptr());
let mut jl = Julia {
page: StackPage::default(),
};
jl.scope_with_slots(1, |_, frame| {
Value::eval_string(&mut *frame, JLRS_JL)?.into_jlrs_result()?;
Ok(())
})
.expect("Could not load Jlrs module");
Ok(jl)
}
/// Enable or disable colored error messages originating from Julia. If this is enabled the
/// error message in [`JlrsError::Exception`] can contain ANSI color codes. This feature is
/// disabled by default.
pub fn error_color(&mut self, enable: bool) -> JlrsResult<()> {
self.scope(|global, _frame| unsafe {
let enable = if enable {
Value::true_v(global)
} else {
Value::false_v(global)
};
Module::main(global)
.submodule_ref("Jlrs")?
.wrapper_unchecked()
.global_ref("color")?
.value_unchecked()
.set_field_unchecked("x", enable)?;
Ok(())
})?;
Ok(())
}
/// Calls `include` in the `Main` module in Julia, which executes the file's contents in that
/// module. This has the same effect as calling `include` in the Julia REPL.
///
/// Example:
///
/// ```no_run
/// # use jlrs::prelude::*;
/// # fn main() {
/// # let mut julia = unsafe { Julia::init().unwrap() };
/// julia.include("Path/To/MyJuliaCode.jl").unwrap();
/// # }
/// ```
pub fn include<P: AsRef<Path>>(&mut self, path: P) -> JlrsResult<()> {
if path.as_ref().exists() {
return self.scope_with_slots(2, |global, frame| unsafe {
let path_jl_str = JuliaString::new(&mut *frame, path.as_ref().to_string_lossy())?;
let include_func = Module::main(global)
.function_ref("include")?
.wrapper_unchecked();
let res = include_func.call1(frame, path_jl_str)?;
return match res {
Ok(_) => Ok(()),
Err(e) => Err(JlrsError::IncludeError {
path: path.as_ref().to_string_lossy().into(),
msg: e.display_string_or(CANNOT_DISPLAY_VALUE),
})?,
};
});
}
Err(JlrsError::IncludeNotFound {
path: path.as_ref().to_string_lossy().into(),
})?
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary
/// results.
///
/// Example:
///
/// ```
/// # use jlrs::prelude::*;
/// # use jlrs::util::JULIA;
/// # fn main() {
/// # JULIA.with(|j| {
/// # let mut julia = j.borrow_mut();
/// julia.scope(|_global, frame| {
/// let _i = Value::new(&mut *frame, 1u64)?;
/// Ok(())
/// }).unwrap();
/// # });
/// # }
/// ```
pub fn scope<T, F>(&mut self, func: F) -> JlrsResult<T>
where
for<'base> F: FnOnce(Global<'base>, &mut GcFrame<'base, Sync>) -> JlrsResult<T>,
{
unsafe {
let global = Global::new();
let mut frame = GcFrame::new(self.page.as_mut(), 0, Sync);
func(global, &mut frame)
}
}
/// This method is a main entrypoint to interact with Julia. It takes a closure with two
/// arguments, a `Global` and a mutable reference to a `GcFrame`, and can return arbitrary | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.