file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
utpgo.go | // Copyright (c) 2021 Storj Labs, Inc.
// Copyright (c) 2010 BitTorrent, Inc.
// See LICENSE for copying information.
package utp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"os"
"runtime/pprof"
"sync"
"syscall"
"time"
"github.com/go-logr/logr"
"storj.io/utp-go/buffers"
"storj.io/utp-go/libutp"
)
// Buffer for data before it gets to µTP (there is another "send buffer" in
// the libutp code, but it is for managing flow control window sizes.
const (
readBufferSize = 200000
writeBufferSize = 200000
)
var noopLogger = logr.DiscardLogger{}
type Addr net.UDPAddr
func (a *Addr) Network() string { return "utp" }
func (a *Addr) String() string { return (*net.UDPAddr)(a).String() }
type Conn struct {
utpSocket
logger logr.Logger
baseConn *libutp.Socket
// set to true if the socket will close once the write buffer is empty
willClose bool
// set to true once the libutp-layer Close has been called
libutpClosed bool
// set to true when the socket has been closed by the remote side (or the
// conn has experienced a timeout or other fatal error)
remoteIsDone bool
// set to true if a read call is pending
readPending bool
// set to true if a write call is pending
writePending bool
// closed when Close() is called
closeChan chan struct{}
// closed when baseConn has entered StateDestroying
baseConnDestroyed chan struct{}
// readBuffer tracks data that has been read on a particular Conn, but
// not yet consumed by the application.
readBuffer *buffers.SyncCircularBuffer
// writeBuffer tracks data that needs to be sent on this Conn, which is
// has not yet been collected by µTP.
writeBuffer *buffers.SyncCircularBuffer
readDeadline time.Time
writeDeadline time.Time
// Set to true while waiting for a connection to complete (got
// state=StateConnect). The connectChan channel will be closed once this
// is set.
connecting bool
connectChan chan struct{}
}
type Listener struct {
utpSocket
acceptChan <-chan *Conn
}
// utpSocket is shared functionality between Conn and Listener.
type utpSocket struct {
localAddr *net.UDPAddr
// manager is shared by all sockets using the same local address
// (for outgoing connections, only the one connection, but for incoming
// connections, this includes all connections received by the associated
// listening socket). It is reference-counted, and thus will only be
// cleaned up entirely when the last related socket is closed.
manager *socketManager
// changes to encounteredError, manager, or other state variables in Conn
// or Listener should all be protected with this lock. If it must be
// acquired at the same time as manager.baseConnLock, the
// manager.baseConnLock must be acquired first.
stateLock sync.Mutex
// Once set, all further Write/Read operations should fail with this error.
encounteredError error
}
func Dial(network, address string) (net.Conn, error) {
return DialOptions(network, address)
}
func DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return DialOptions(network, address, WithContext(ctx))
}
func DialOptions(network, address string, options ...ConnectOption) (net.Conn, error) {
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
rAddr, err := ResolveUTPAddr(network, address)
if err != nil {
return nil, err
}
return DialUTPOptions(network, nil, rAddr, options...)
}
func DialUTP(network string, localAddr, remoteAddr *Addr) (net.Conn, error) {
return DialUTPOptions(network, localAddr, remoteAddr)
}
func DialUTPOptions(network string, localAddr, remoteAddr *Addr, options ...ConnectOption) (net.Conn, error) {
s := utpDialState{
logger: &noopLogger,
ctx: context.Background(),
tlsConfig: nil,
}
for _, opt := range options {
opt.apply(&s)
}
conn, err := dial(s.ctx, s.logger, network, localAddr, remoteAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.Client(conn, s.tlsConfig), nil
}
return conn, nil
}
func dial(ctx context.Context, logger logr.Logger, network string, localAddr, remoteAddr *Addr) (*Conn, error) {
managerLogger := logger.WithValues("remote-addr", remoteAddr)
manager, err := newSocketManager(managerLogger, network, (*net.UDPAddr)(localAddr), (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
localUDPAddr := manager.LocalAddr().(*net.UDPAddr)
// different from managerLogger in case local addr interface and/or port
// has been clarified
connLogger := logger.WithValues("local-addr", localUDPAddr, "remote-addr", remoteAddr, "dir", "out")
utpConn := &Conn{
utpSocket: utpSocket{
localAddr: localUDPAddr,
manager: manager,
},
logger: connLogger.WithName("utp-conn"),
connecting: true,
connectChan: make(chan struct{}),
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
connLogger.V(10).Info("creating outgoing socket")
// thread-safe here, because no other goroutines could have a handle to
// this mx yet.
utpConn.baseConn, err = manager.mx.Create(packetSendCallback, manager, (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
utpConn.baseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, utpConn)
utpConn.baseConn.SetLogger(connLogger.WithName("utp-socket"))
manager.start()
func() {
// now that the manager's goroutines have started, we do need
// concurrency protection
manager.baseConnLock.Lock()
defer manager.baseConnLock.Unlock()
connLogger.V(10).Info("initiating libutp-level Connect()")
utpConn.baseConn.Connect()
}()
select {
case <-ctx.Done():
_ = utpConn.Close()
return nil, ctx.Err()
case <-utpConn.connectChan:
}
// connection operation is complete, successful or not; record any error met
utpConn.stateLock.Lock()
err = utpConn.encounteredError
utpConn.stateLock.Unlock()
if err != nil {
_ = utpConn.Close()
return nil, utpConn.makeOpError("dial", err)
}
return utpConn, nil
}
func Listen(network string, addr string) (net.Listener, error) {
return ListenOptions(network, addr)
}
func ListenOptions(network, addr string, options ...ConnectOption) (net.Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
udpAddr, err := ResolveUTPAddr(network, addr)
if err != nil {
return nil, err
}
listener, err := listen(s.logger, network, udpAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.NewListener(listener, s.tlsConfig), nil
}
return listener, nil
}
func ListenUTP(network string, localAddr *Addr) (*Listener, error) {
return listen(&noopLogger, network, localAddr)
}
func ListenUTPOptions(network string, localAddr *Addr, options ...ConnectOption) (*Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
return listen(s.logger, network, localAddr)
}
func listen(logger logr.Logger, network string, localAddr *Addr) (*Listener, error) {
manager, err := newSocketManager(logger, network, (*net.UDPAddr)(localAddr), nil)
if err != nil {
return nil, err
}
udpLocalAddr := manager.LocalAddr().(*net.UDPAddr)
utpListener := &Listener{
utpSocket: utpSocket{
localAddr: udpLocalAddr,
manager: manager,
},
acceptChan: manager.acceptChan,
}
manager.start()
return utpListener, nil
}
type utpDialState struct {
logger logr.Logger
ctx context.Context
tlsConfig *tls.Config
}
type ConnectOption interface {
apply(s *utpDialState)
}
type optionLogger struct {
logger logr.Logger
}
func (o *optionLogger) apply(s *utpDialState) {
s.logger = o.logger
}
func WithLogger(logger logr.Logger) ConnectOption {
return &optionLogger{logger: logger}
}
type optionContext struct {
ctx context.Context
}
func (o *optionContext) apply(s *utpDialState) {
s.ctx = o.ctx
}
func WithContext(ctx context.Context) ConnectOption {
return &optionContext{ctx: ctx}
}
type optionTLS struct {
tlsConfig *tls.Config
}
func (o *optionTLS) apply(s *utpDialState) {
s.tlsConfig = o.tlsConfig
}
func WithTLS(tlsConfig *tls.Config) ConnectOption {
return &optionTLS{tlsConfig: tlsConfig}
}
func (c *Conn) Close() error {
// indicate our desire to close; once buffers are flushed, we can continue
c.stateLock.Lock()
if c.willClose {
c.stateLock.Unlock()
return errors.New("multiple calls to Close() not allowed")
}
c.willClose = true
c.stateLock.Unlock()
// wait for write buffer to be flushed
c.writeBuffer.FlushAndClose()
// if there are still any blocked reads, shut them down
c.readBuffer.Close()
// close baseConn
err := func() error {
// yes, even libutp.(*UTPSocket).Close() needs concurrency protection;
// it may end up invoking callbacks
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
c.logger.V(10).Info("closing baseConn")
c.libutpClosed = true
return c.baseConn.Close()
}()
// wait for socket to enter StateDestroying
<-c.baseConnDestroyed
c.setEncounteredError(net.ErrClosed)
socketCloseErr := c.utpSocket.Close()
// even if err was already set, this one is likely to be more helpful/interesting.
if socketCloseErr != nil {
err = socketCloseErr
}
return err
}
func (c *Conn) SetLogger(logger logr.Logger) {
c.baseConn.SetLogger(logger)
}
func (c *Conn) Read(buf []byte) (n int, err error) {
return c.ReadContext(context.Background(), buf)
}
func (c *Conn) stateEnterRead() error {
switch {
case c.readPending:
return buffers.ReaderAlreadyWaitingErr
case c.willClose:
return c.makeOpError("read", net.ErrClosed)
case c.remoteIsDone && c.readBuffer.SpaceUsed() == 0:
return c.makeOpError("read", c.encounteredError)
}
c.readPending = true
return nil
}
func (c *Conn) ReadContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
encounteredErr := c.encounteredError
deadline := c.readDeadline
err = c.stateEnterRead()
c.stateLock.Unlock()
if err != nil {
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readPending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
var ok bool
n, ok = c.readBuffer.TryConsume(buf)
if ok {
if n == 0 {
return 0, io.EOF
}
return n, nil
}
if encounteredErr != nil {
return 0, c.makeOpError("read", encounteredErr)
}
waitChan, cancelWait, err := c.readBuffer.WaitForBytesChan(1)
if err != nil {
return 0, err
}
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("read", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("read", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) Write(buf []byte) (n int, err error) {
return c.WriteContext(context.Background(), buf)
}
func (c *Conn) WriteContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
if c.writePending {
c.stateLock.Unlock()
return 0, buffers.WriterAlreadyWaitingErr
}
c.writePending = true
deadline := c.writeDeadline
c.stateLock.Unlock()
if err != nil {
if err == io.EOF {
// remote side closed connection cleanly, and µTP in/out streams
// are not independently closeable. Doesn't make sense to return
// an EOF from a Write method, so..
err = c.makeOpError("write", syscall.ECONNRESET)
} else if err == net.ErrClosed {
err = c.makeOpError("write", net.ErrClosed)
}
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writePending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
c.stateLock.Lock()
willClose := c.willClose
remoteIsDone := c.remoteIsDone
encounteredError := c.encounteredError
c.stateLock.Unlock()
if willClose {
return 0, c.makeOpError("write", net.ErrClosed)
}
if remoteIsDone {
return 0, c.makeOpError("write", encounteredError)
}
if ok := c.writeBuffer.TryAppend(buf); ok {
// make sure µTP knows about the new bytes. this might be a bit
// confusing, but it doesn't matter if other writes occur between
// the TryAppend() above and the acquisition of the baseConnLock
// below. All that matters is that (a) there is at least one call
// to baseConn.Write scheduled to be made after this point (without
// undue blocking); (b) baseConnLock is held when that Write call
// is made; and (c) the amount of data in the write buffer does not
// decrease between the SpaceUsed() call and the start of the next
// call to onWriteCallback.
func() {
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
amount := c.writeBuffer.SpaceUsed()
c.logger.V(10).Info("informing libutp layer of data for writing", "len", amount)
c.baseConn.Write(amount)
}()
return len(buf), nil
}
waitChan, cancelWait, err := c.writeBuffer.WaitForSpaceChan(len(buf))
if err != nil {
if err == buffers.IsClosedErr {
err = c.makeOpError("write", c.encounteredError)
}
return 0, err
}
// couldn't write the data yet; wait until we can, or until we hit the
// timeout, or until the conn is closed.
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("write", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("write", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) RemoteAddr() net.Addr {
// GetPeerName is thread-safe
return (*Addr)(c.baseConn.GetPeerName())
}
func (c *Conn) SetReadDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readDeadline = t
return nil
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
return nil
}
func (c *Conn) SetDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
c.readDeadline = t
return nil
}
func (c *Conn) makeOpError(op string, err error) error {
opErr := c.utpSocket.makeOpError(op, err).(*net.OpError)
opErr.Source = opErr.Addr
opErr.Addr = c.RemoteAddr()
return opErr
}
var _ net.Conn = &Conn{}
func (l *Listener) AcceptUTPContext(ctx context.Context) (*Conn, error) {
select {
case newConn, ok := <-l.acceptChan:
if ok {
return newConn, nil
}
err := l.encounteredError
if err == nil {
err = l.makeOpError("accept", net.ErrClosed)
}
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (l *Listener) AcceptUTP() (*Conn, error) {
return l.AcceptUTPContext(context.Background())
}
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptUTP()
}
func (l *Listener) AcceptContext(ctx context.Context) (net.Conn, error) {
return l.AcceptUTPContext(ctx)
}
func (l *Listener) Close() error {
return l.utpSocket.Close()
}
func (l *Listener) Addr() net.Addr {
return l.utpSocket.LocalAddr()
}
var _ net.Listener = &Listener{}
func (u *utpSocket) makeOpError(op string, err error) error {
return &net.OpError{
Op: op,
Net: "utp",
Source: nil,
Addr: u.LocalAddr(),
Err: err,
}
}
func (u *utpSocket) Close() (err error) {
u.stateLock.Lock()
if u.manager != nil {
err = u.manager.decrementReferences()
u.manager = nil
}
u.stateLock.Unlock()
return err
}
func (c *Conn) setEncounteredError(err error) {
if err == nil {
return
}
c.stateLock.Lock()
defer c.stateLock.Unlock()
// keep the first error if this is called multiple times
if c.encounteredError == nil {
c.encounteredError = err
}
if c.connecting {
c.connecting = false
close(c.connectChan)
}
}
func (u *utpSocket) LocalAddr() net.Addr {
return (*Addr)(u.localAddr)
}
type socketManager struct {
mx *libutp.SocketMultiplexer
logger logr.Logger
udpSocket *net.UDPConn
// this lock should be held when invoking any libutp functions or methods
// that are not thread-safe or which themselves might invoke callbacks
// (that is, nearly all libutp functions or methods). It can be assumed
// that this lock is held in callbacks.
baseConnLock sync.Mutex
refCountLock sync.Mutex
refCount int
// cancelManagement is a cancel function that should be called to close
// down the socket management goroutines. The main managing goroutine
// should clean up and return any close error on closeErr.
cancelManagement func()
// closeErr is a channel on which the managing goroutine will return any
// errors from a close operation when all is complete.
closeErr chan error
// to be allocated with a buffer the size of the intended backlog. There
// can be at most one utpSocket able to receive on this channel (one
// Listener for any given UDP socket).
acceptChan chan *Conn
// just a way to accumulate errors in sending or receiving on the UDP
// socket; this may cause future Write/Read method calls to return the
// error in the future
socketErrors []error
socketErrorsLock sync.Mutex
pollInterval time.Duration
}
const (
defaultUTPConnBacklogSize = 5
)
func newSocketManager(logger logr.Logger, network string, localAddr, remoteAddr *net.UDPAddr) (*socketManager, error) {
switch network {
case "utp", "utp4", "utp6":
default:
op := "dial"
if remoteAddr == nil {
op = "listen"
}
return nil, &net.OpError{Op: op, Net: network, Source: localAddr, Addr: remoteAddr, Err: net.UnknownNetworkError(network)}
}
udpNetwork := "udp" + network[3:]
// thread-safe here; don't need baseConnLock
mx := libutp.NewSocketMultiplexer(logger.WithName("mx").WithValues("local-addr", localAddr.String()), nil)
udpSocket, err := net.ListenUDP(udpNetwork, localAddr)
if err != nil {
return nil, err
}
sm := &socketManager{
mx: mx,
logger: logger.WithName("manager").WithValues("local-addr", udpSocket.LocalAddr()),
udpSocket: udpSocket,
refCount: 1,
closeErr: make(chan error),
acceptChan: make(chan *Conn, defaultUTPConnBacklogSize),
pollInterval: 5 * time.Millisecond,
}
return sm, nil
}
func (sm *socketManager) start() {
ctx, cancel := context.WithCancel(context.Background())
sm.cancelManagement = cancel
managementLabels := pprof.Labels(
"name", "socket-management", "udp-socket", sm.udpSocket.LocalAddr().String())
receiverLabels := pprof.Labels(
"name", "udp-receiver", "udp-socket", sm.udpSocket.LocalAddr().String())
go func() {
pprof.Do(ctx, managementLabels, sm.socketManagement)
}()
go func() {
pprof.Do(ctx, receiverLabels, sm.udpMessageReceiver)
}()
}
func (sm *socketManager) LocalAddr() net.Addr {
return sm.udpSocket.LocalAddr()
}
func (sm *socketManager) socketManagement(ctx context.Context) {
timer := time.NewTimer(sm.pollInterval)
defer timer.Stop()
for {
timer.Reset(sm.pollInterval)
select {
case <-ctx.Done():
// at this point, all attached Conn instances should be
// closed already
sm.internalClose()
return
case <-timer.C:
}
sm.checkTimeouts()
}
}
func (sm *socketManager) proc | a []byte, destAddr *net.UDPAddr) {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.IsIncomingUTP(gotIncomingConnectionCallback, packetSendCallback, sm, data, destAddr)
}
func (sm *socketManager) checkTimeouts() {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.CheckTimeouts()
}
func (sm *socketManager) internalClose() {
err := sm.udpSocket.Close()
sm.mx = nil
sm.closeErr <- err
close(sm.closeErr)
close(sm.acceptChan)
}
func (sm *socketManager) incrementReferences() {
sm.refCountLock.Lock()
sm.refCount++
sm.refCountLock.Unlock()
}
func (sm *socketManager) decrementReferences() error {
sm.refCountLock.Lock()
defer sm.refCountLock.Unlock()
sm.refCount--
if sm.refCount == 0 {
sm.logger.V(1).Info("closing socketManager")
sm.cancelManagement()
return <-sm.closeErr
}
if sm.refCount < 0 {
return errors.New("socketManager closed too many times")
}
return nil
}
func (sm *socketManager) udpMessageReceiver(ctx context.Context) {
// thread-safe; don't need baseConnLock for GetUDPMTU
bufSize := libutp.GetUDPMTU(sm.LocalAddr().(*net.UDPAddr))
// It turns out GetUDPMTU is frequently wrong, and when it gives us a lower
// number than the real MTU, and the other side is sending bigger packets,
// then we end up not being able to read the full packets. Start with a
// receive buffer twice as big as we thought we might need, and increase it
// further from there if needed.
bufSize *= 2
sm.logger.V(0).Info("udp message receiver started", "receive-buf-size", bufSize, "local-addr", sm.LocalAddr())
b := make([]byte, bufSize)
for {
n, _, flags, addr, err := sm.udpSocket.ReadMsgUDP(b, nil)
if err != nil {
if ctx.Err() != nil {
// we expect an error here; the socket has been closed; it's fine
return
}
sm.registerSocketError(err)
continue
}
if flags & syscall.MSG_TRUNC != 0 {
// we didn't get the whole packet. don't pass it on to µTP; it
// won't recognize the truncation and will pretend like that's
// all the data there is. let the packet loss detection stuff
// do its part instead.
continue
}
sm.logger.V(10).Info("udp received bytes", "len", n, "remote-addr", addr)
sm.processIncomingPacket(b[:n], addr)
}
}
func (sm *socketManager) registerSocketError(err error) {
sm.socketErrorsLock.Lock()
defer sm.socketErrorsLock.Unlock()
sm.logger.Error(err, "socket error")
sm.socketErrors = append(sm.socketErrors, err)
}
func gotIncomingConnectionCallback(userdata interface{}, newBaseConn *libutp.Socket) {
sm := userdata.(*socketManager)
remoteAddr := sm.udpSocket.RemoteAddr()
if remoteAddr != nil {
// this is not a listening-mode socket! we'll reject this spurious packet
_ = newBaseConn.Close()
return
}
sm.incrementReferences()
connLogger := sm.logger.WithName("utp-socket").WithValues("dir", "in", "remote-addr", newBaseConn.GetPeerName())
newUTPConn := &Conn{
utpSocket: utpSocket{
localAddr: sm.LocalAddr().(*net.UDPAddr),
manager: sm,
},
logger: connLogger,
baseConn: newBaseConn,
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
newBaseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, newUTPConn)
sm.logger.V(1).Info("accepted new connection", "remote-addr", newUTPConn.RemoteAddr())
select {
case sm.acceptChan <- newUTPConn:
// it's the socketManager's problem now
default:
sm.logger.Info("dropping new connection because full backlog", "remote-addr", newUTPConn.RemoteAddr())
// The accept backlog is full; drop this new connection. We can't call
// (*Conn).Close() from here, because the baseConnLock is already held.
// Fortunately, most of the steps done there aren't necessary here
// because we have never exposed this instance to the user.
_ = newUTPConn.baseConn.Close()
// This step will decref the socketManager back to where it was before
// this instance was created.
_ = newUTPConn.manager.decrementReferences()
newUTPConn.manager = nil
}
}
func packetSendCallback(userdata interface{}, buf []byte, addr *net.UDPAddr) {
sm := userdata.(*socketManager)
sm.logger.V(10).Info("udp sending bytes", "len", len(buf), "remote-addr", addr.String())
_, err := sm.udpSocket.WriteToUDP(buf, addr)
if err != nil {
sm.registerSocketError(err)
}
}
func onReadCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
c.stateDebugLogLocked("entering onReadCallback", "got-bytes", len(buf))
isClosing := c.willClose
c.stateLock.Unlock()
if isClosing {
// the local side has closed the connection; they don't want any additional data
return
}
if ok := c.readBuffer.TryAppend(buf); !ok {
// I think this should not happen; the flow control mechanism should
// keep us from getting more data than the (libutp-level) receive
// buffer can hold.
used := c.readBuffer.SpaceUsed()
avail := c.readBuffer.SpaceAvailable()
c.logger.Error(nil, "receive buffer overflow", "buffer-size", used+avail, "buffer-holds", c.readBuffer.SpaceUsed(), "new-data", len(buf))
panic("receive buffer overflow")
}
c.stateDebugLog("finishing onReadCallback")
}
func onWriteCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.stateDebugLogLocked("entering onWriteCallback", "accepting-bytes", len(buf))
ok := c.writeBuffer.TryConsumeFull(buf)
if !ok {
// I think this should not happen; this callback should only be called
// with data less than or equal to the number we pass in with
// libutp.(*Socket).Write(). That gets passed in under the
// baseConnLock, and this gets called under that same lock, so it also
// shouldn't be possible for something to pull data from the write
// buffer between that point and this point.
panic("send buffer underflow")
}
c.stateDebugLogLocked("finishing onWriteCallback")
}
func getRBSizeCallback(userdata interface{}) int {
c := userdata.(*Conn)
return c.readBuffer.SpaceUsed()
}
func (c *Conn) onConnectOrWritable(state libutp.State) {
c.stateLock.Lock()
c.stateDebugLogLocked("entering onConnectOrWritable", "libutp-state", state)
if c.connecting {
c.connecting = false
close(c.connectChan)
}
c.stateLock.Unlock()
if writeAmount := c.writeBuffer.SpaceUsed(); writeAmount > 0 {
c.logger.V(10).Info("initiating write to libutp layer", "len", writeAmount)
c.baseConn.Write(writeAmount)
} else {
c.logger.V(10).Info("nothing to write")
}
c.stateDebugLog("finishing onConnectOrWritable")
}
func (c *Conn) onConnectionFailure(err error) {
c.stateDebugLog("entering onConnectionFailure", "err-text", err.Error())
// mark EOF as encountered error, so that it gets returned from
// subsequent Read calls
c.setEncounteredError(err)
// clear out write buffer; we won't be able to send it now. If a call
// to Close() is already waiting, we don't need to make it wait any
// longer
c.writeBuffer.Close()
// this will allow any pending reads to complete (as short reads)
c.readBuffer.CloseForWrites()
c.stateDebugLog("finishing onConnectionFailure")
}
// the baseConnLock should already be held when this callback is entered
func onStateCallback(userdata interface{}, state libutp.State) {
c := userdata.(*Conn)
switch state {
case libutp.StateConnect, libutp.StateWritable:
c.onConnectOrWritable(state)
case libutp.StateEOF:
c.onConnectionFailure(io.EOF)
case libutp.StateDestroying:
close(c.baseConnDestroyed)
}
}
// This could be ECONNRESET, ECONNREFUSED, or ETIMEDOUT.
//
// the baseConnLock should already be held when this callback is entered
func onErrorCallback(userdata interface{}, err error) {
c := userdata.(*Conn)
c.logger.Error(err, "onError callback from libutp layer")
// we have to treat this like a total connection failure
c.onConnectionFailure(err)
// and we have to cover a corner case where this error was encountered
// _during_ the libutp Close() call- in this case, libutp would sit
// forever and never get to StateDestroying, so we have to prod it again.
if c.libutpClosed {
if err := c.baseConn.Close(); err != nil {
c.logger.Error(err, "error from libutp layer Close()")
}
}
}
func ResolveUTPAddr(network, address string) (*Addr, error) {
switch network {
case "utp", "utp4", "utp6":
udpNetwork := "udp" + network[3:]
udpAddr, err := net.ResolveUDPAddr(udpNetwork, address)
if err != nil {
return nil, err
}
return (*Addr)(udpAddr), nil
}
return nil, net.UnknownNetworkError(network)
}
| essIncomingPacket(dat | identifier_name |
utpgo.go | // Copyright (c) 2021 Storj Labs, Inc.
// Copyright (c) 2010 BitTorrent, Inc.
// See LICENSE for copying information.
package utp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"os"
"runtime/pprof"
"sync"
"syscall"
"time"
"github.com/go-logr/logr"
"storj.io/utp-go/buffers"
"storj.io/utp-go/libutp"
)
// Buffer for data before it gets to µTP (there is another "send buffer" in
// the libutp code, but it is for managing flow control window sizes.
const (
readBufferSize = 200000
writeBufferSize = 200000
)
var noopLogger = logr.DiscardLogger{}
type Addr net.UDPAddr
func (a *Addr) Network() string { return "utp" }
func (a *Addr) String() string { return (*net.UDPAddr)(a).String() }
type Conn struct {
utpSocket
logger logr.Logger
baseConn *libutp.Socket
// set to true if the socket will close once the write buffer is empty
willClose bool
// set to true once the libutp-layer Close has been called
libutpClosed bool
// set to true when the socket has been closed by the remote side (or the
// conn has experienced a timeout or other fatal error)
remoteIsDone bool
// set to true if a read call is pending
readPending bool
// set to true if a write call is pending
writePending bool
// closed when Close() is called
closeChan chan struct{}
// closed when baseConn has entered StateDestroying
baseConnDestroyed chan struct{}
// readBuffer tracks data that has been read on a particular Conn, but
// not yet consumed by the application.
readBuffer *buffers.SyncCircularBuffer
// writeBuffer tracks data that needs to be sent on this Conn, which is
// has not yet been collected by µTP.
writeBuffer *buffers.SyncCircularBuffer
readDeadline time.Time
writeDeadline time.Time
// Set to true while waiting for a connection to complete (got
// state=StateConnect). The connectChan channel will be closed once this
// is set.
connecting bool
connectChan chan struct{}
}
type Listener struct {
utpSocket
acceptChan <-chan *Conn
}
// utpSocket is shared functionality between Conn and Listener.
type utpSocket struct {
localAddr *net.UDPAddr
// manager is shared by all sockets using the same local address
// (for outgoing connections, only the one connection, but for incoming
// connections, this includes all connections received by the associated
// listening socket). It is reference-counted, and thus will only be
// cleaned up entirely when the last related socket is closed.
manager *socketManager
// changes to encounteredError, manager, or other state variables in Conn
// or Listener should all be protected with this lock. If it must be
// acquired at the same time as manager.baseConnLock, the
// manager.baseConnLock must be acquired first.
stateLock sync.Mutex
// Once set, all further Write/Read operations should fail with this error.
encounteredError error
}
func Dial(network, address string) (net.Conn, error) {
return DialOptions(network, address)
}
func DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return DialOptions(network, address, WithContext(ctx))
}
func DialOptions(network, address string, options ...ConnectOption) (net.Conn, error) {
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
rAddr, err := ResolveUTPAddr(network, address)
if err != nil {
return nil, err
}
return DialUTPOptions(network, nil, rAddr, options...)
}
func DialUTP(network string, localAddr, remoteAddr *Addr) (net.Conn, error) {
return DialUTPOptions(network, localAddr, remoteAddr)
}
func DialUTPOptions(network string, localAddr, remoteAddr *Addr, options ...ConnectOption) (net.Conn, error) {
s := utpDialState{
logger: &noopLogger,
ctx: context.Background(),
tlsConfig: nil,
}
for _, opt := range options {
opt.apply(&s)
}
conn, err := dial(s.ctx, s.logger, network, localAddr, remoteAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.Client(conn, s.tlsConfig), nil
}
return conn, nil
}
func dial(ctx context.Context, logger logr.Logger, network string, localAddr, remoteAddr *Addr) (*Conn, error) {
managerLogger := logger.WithValues("remote-addr", remoteAddr)
manager, err := newSocketManager(managerLogger, network, (*net.UDPAddr)(localAddr), (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
localUDPAddr := manager.LocalAddr().(*net.UDPAddr)
// different from managerLogger in case local addr interface and/or port
// has been clarified
connLogger := logger.WithValues("local-addr", localUDPAddr, "remote-addr", remoteAddr, "dir", "out")
utpConn := &Conn{
utpSocket: utpSocket{
localAddr: localUDPAddr,
manager: manager,
},
logger: connLogger.WithName("utp-conn"),
connecting: true,
connectChan: make(chan struct{}),
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
connLogger.V(10).Info("creating outgoing socket")
// thread-safe here, because no other goroutines could have a handle to
// this mx yet.
utpConn.baseConn, err = manager.mx.Create(packetSendCallback, manager, (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
utpConn.baseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, utpConn)
utpConn.baseConn.SetLogger(connLogger.WithName("utp-socket"))
manager.start()
func() {
// now that the manager's goroutines have started, we do need
// concurrency protection
manager.baseConnLock.Lock()
defer manager.baseConnLock.Unlock()
connLogger.V(10).Info("initiating libutp-level Connect()")
utpConn.baseConn.Connect()
}()
select {
case <-ctx.Done():
_ = utpConn.Close()
return nil, ctx.Err()
case <-utpConn.connectChan:
}
// connection operation is complete, successful or not; record any error met
utpConn.stateLock.Lock()
err = utpConn.encounteredError
utpConn.stateLock.Unlock()
if err != nil {
_ = utpConn.Close()
return nil, utpConn.makeOpError("dial", err)
}
return utpConn, nil
}
func Listen(network string, addr string) (net.Listener, error) {
return ListenOptions(network, addr)
}
func ListenOptions(network, addr string, options ...ConnectOption) (net.Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
udpAddr, err := ResolveUTPAddr(network, addr)
if err != nil {
return nil, err
}
listener, err := listen(s.logger, network, udpAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.NewListener(listener, s.tlsConfig), nil
}
return listener, nil
}
func ListenUTP(network string, localAddr *Addr) (*Listener, error) {
return listen(&noopLogger, network, localAddr)
}
func ListenUTPOptions(network string, localAddr *Addr, options ...ConnectOption) (*Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
return listen(s.logger, network, localAddr)
}
func listen(logger logr.Logger, network string, localAddr *Addr) (*Listener, error) {
manager, err := newSocketManager(logger, network, (*net.UDPAddr)(localAddr), nil)
if err != nil {
return nil, err
}
udpLocalAddr := manager.LocalAddr().(*net.UDPAddr)
utpListener := &Listener{
utpSocket: utpSocket{
localAddr: udpLocalAddr,
manager: manager,
},
acceptChan: manager.acceptChan,
}
manager.start()
return utpListener, nil
}
type utpDialState struct {
logger logr.Logger
ctx context.Context
tlsConfig *tls.Config
}
type ConnectOption interface {
apply(s *utpDialState)
}
type optionLogger struct {
logger logr.Logger
}
func (o *optionLogger) apply(s *utpDialState) {
s.logger = o.logger
}
func WithLogger(logger logr.Logger) ConnectOption {
return &optionLogger{logger: logger}
}
type optionContext struct {
ctx context.Context
}
func (o *optionContext) apply(s *utpDialState) {
s.ctx = o.ctx
}
func WithContext(ctx context.Context) ConnectOption {
return &optionContext{ctx: ctx}
}
type optionTLS struct {
tlsConfig *tls.Config
}
func (o *optionTLS) apply(s *utpDialState) {
s.tlsConfig = o.tlsConfig
}
func WithTLS(tlsConfig *tls.Config) ConnectOption {
return &optionTLS{tlsConfig: tlsConfig}
}
func (c *Conn) Close() error {
// indicate our desire to close; once buffers are flushed, we can continue
c.stateLock.Lock()
if c.willClose {
c.stateLock.Unlock()
return errors.New("multiple calls to Close() not allowed")
}
c.willClose = true
c.stateLock.Unlock()
// wait for write buffer to be flushed
c.writeBuffer.FlushAndClose()
// if there are still any blocked reads, shut them down
c.readBuffer.Close()
// close baseConn
err := func() error {
// yes, even libutp.(*UTPSocket).Close() needs concurrency protection;
// it may end up invoking callbacks
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
c.logger.V(10).Info("closing baseConn")
c.libutpClosed = true
return c.baseConn.Close()
}()
// wait for socket to enter StateDestroying
<-c.baseConnDestroyed
c.setEncounteredError(net.ErrClosed)
socketCloseErr := c.utpSocket.Close()
// even if err was already set, this one is likely to be more helpful/interesting.
if socketCloseErr != nil {
err = socketCloseErr
}
return err
}
func (c *Conn) SetLogger(logger logr.Logger) {
c.baseConn.SetLogger(logger)
}
func (c *Conn) Read(buf []byte) (n int, err error) {
return c.ReadContext(context.Background(), buf)
}
func (c *Conn) stateEnterRead() error {
switch {
case c.readPending:
return buffers.ReaderAlreadyWaitingErr
case c.willClose:
return c.makeOpError("read", net.ErrClosed)
case c.remoteIsDone && c.readBuffer.SpaceUsed() == 0:
return c.makeOpError("read", c.encounteredError)
}
c.readPending = true
return nil
}
func (c *Conn) ReadContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
encounteredErr := c.encounteredError
deadline := c.readDeadline
err = c.stateEnterRead()
c.stateLock.Unlock()
if err != nil {
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readPending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
var ok bool
n, ok = c.readBuffer.TryConsume(buf)
if ok {
if n == 0 {
return 0, io.EOF
}
return n, nil
}
if encounteredErr != nil {
return 0, c.makeOpError("read", encounteredErr)
}
waitChan, cancelWait, err := c.readBuffer.WaitForBytesChan(1)
if err != nil {
return 0, err
}
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("read", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("read", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) Write(buf []byte) (n int, err error) {
return c.WriteContext(context.Background(), buf)
}
func (c *Conn) WriteContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
if c.writePending {
c.stateLock.Unlock()
return 0, buffers.WriterAlreadyWaitingErr
}
c.writePending = true
deadline := c.writeDeadline
c.stateLock.Unlock()
if err != nil {
if err == io.EOF {
// remote side closed connection cleanly, and µTP in/out streams
// are not independently closeable. Doesn't make sense to return
// an EOF from a Write method, so..
err = c.makeOpError("write", syscall.ECONNRESET)
} else if err == net.ErrClosed {
err = c.makeOpError("write", net.ErrClosed)
}
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writePending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
c.stateLock.Lock()
willClose := c.willClose
remoteIsDone := c.remoteIsDone
encounteredError := c.encounteredError
c.stateLock.Unlock()
if willClose {
return 0, c.makeOpError("write", net.ErrClosed)
}
if remoteIsDone {
return 0, c.makeOpError("write", encounteredError)
}
if ok := c.writeBuffer.TryAppend(buf); ok {
// make sure µTP knows about the new bytes. this might be a bit
// confusing, but it doesn't matter if other writes occur between
// the TryAppend() above and the acquisition of the baseConnLock
// below. All that matters is that (a) there is at least one call
// to baseConn.Write scheduled to be made after this point (without
// undue blocking); (b) baseConnLock is held when that Write call
// is made; and (c) the amount of data in the write buffer does not
// decrease between the SpaceUsed() call and the start of the next
// call to onWriteCallback.
func() {
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
amount := c.writeBuffer.SpaceUsed()
c.logger.V(10).Info("informing libutp layer of data for writing", "len", amount)
c.baseConn.Write(amount)
}()
return len(buf), nil
}
waitChan, cancelWait, err := c.writeBuffer.WaitForSpaceChan(len(buf))
if err != nil {
if err == buffers.IsClosedErr {
err = c.makeOpError("write", c.encounteredError)
}
return 0, err
}
// couldn't write the data yet; wait until we can, or until we hit the
// timeout, or until the conn is closed.
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("write", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("write", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) RemoteAddr() net.Addr {
// GetPeerName is thread-safe
return (*Addr)(c.baseConn.GetPeerName())
}
func (c *Conn) SetReadDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readDeadline = t
return nil
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
return nil
}
func (c *Conn) SetDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
c.readDeadline = t
return nil
}
func (c *Conn) makeOpError(op string, err error) error {
opErr := c.utpSocket.makeOpError(op, err).(*net.OpError)
opErr.Source = opErr.Addr
opErr.Addr = c.RemoteAddr()
return opErr
}
var _ net.Conn = &Conn{}
func (l *Listener) AcceptUTPContext(ctx context.Context) (*Conn, error) {
select {
case newConn, ok := <-l.acceptChan:
if ok {
return newConn, nil
}
err := l.encounteredError
if err == nil {
err = l.makeOpError("accept", net.ErrClosed)
}
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (l *Listener) AcceptUTP() (*Conn, error) {
return l.AcceptUTPContext(context.Background())
}
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptUTP()
}
func (l *Listener) AcceptContext(ctx context.Context) (net.Conn, error) {
return l.AcceptUTPContext(ctx)
}
func (l *Listener) Close() error {
return l.utpSocket.Close()
}
func (l *Listener) Addr() net.Addr {
return l.utpSocket.LocalAddr()
}
var _ net.Listener = &Listener{}
func (u *utpSocket) makeOpError(op string, err error) error {
return &net.OpError{
Op: op,
Net: "utp",
Source: nil,
Addr: u.LocalAddr(),
Err: err,
}
}
func (u *utpSocket) Close() (err error) {
u.stateLock.Lock()
if u.manager != nil {
err = u.manager.decrementReferences()
u.manager = nil
}
u.stateLock.Unlock()
return err
}
func (c *Conn) setEncounteredError(err error) {
if err == nil {
return
}
c.stateLock.Lock()
defer c.stateLock.Unlock()
// keep the first error if this is called multiple times
if c.encounteredError == nil {
c.encounteredError = err
}
if c.connecting {
c.connecting = false | func (u *utpSocket) LocalAddr() net.Addr {
return (*Addr)(u.localAddr)
}
type socketManager struct {
mx *libutp.SocketMultiplexer
logger logr.Logger
udpSocket *net.UDPConn
// this lock should be held when invoking any libutp functions or methods
// that are not thread-safe or which themselves might invoke callbacks
// (that is, nearly all libutp functions or methods). It can be assumed
// that this lock is held in callbacks.
baseConnLock sync.Mutex
refCountLock sync.Mutex
refCount int
// cancelManagement is a cancel function that should be called to close
// down the socket management goroutines. The main managing goroutine
// should clean up and return any close error on closeErr.
cancelManagement func()
// closeErr is a channel on which the managing goroutine will return any
// errors from a close operation when all is complete.
closeErr chan error
// to be allocated with a buffer the size of the intended backlog. There
// can be at most one utpSocket able to receive on this channel (one
// Listener for any given UDP socket).
acceptChan chan *Conn
// just a way to accumulate errors in sending or receiving on the UDP
// socket; this may cause future Write/Read method calls to return the
// error in the future
socketErrors []error
socketErrorsLock sync.Mutex
pollInterval time.Duration
}
const (
defaultUTPConnBacklogSize = 5
)
func newSocketManager(logger logr.Logger, network string, localAddr, remoteAddr *net.UDPAddr) (*socketManager, error) {
switch network {
case "utp", "utp4", "utp6":
default:
op := "dial"
if remoteAddr == nil {
op = "listen"
}
return nil, &net.OpError{Op: op, Net: network, Source: localAddr, Addr: remoteAddr, Err: net.UnknownNetworkError(network)}
}
udpNetwork := "udp" + network[3:]
// thread-safe here; don't need baseConnLock
mx := libutp.NewSocketMultiplexer(logger.WithName("mx").WithValues("local-addr", localAddr.String()), nil)
udpSocket, err := net.ListenUDP(udpNetwork, localAddr)
if err != nil {
return nil, err
}
sm := &socketManager{
mx: mx,
logger: logger.WithName("manager").WithValues("local-addr", udpSocket.LocalAddr()),
udpSocket: udpSocket,
refCount: 1,
closeErr: make(chan error),
acceptChan: make(chan *Conn, defaultUTPConnBacklogSize),
pollInterval: 5 * time.Millisecond,
}
return sm, nil
}
func (sm *socketManager) start() {
ctx, cancel := context.WithCancel(context.Background())
sm.cancelManagement = cancel
managementLabels := pprof.Labels(
"name", "socket-management", "udp-socket", sm.udpSocket.LocalAddr().String())
receiverLabels := pprof.Labels(
"name", "udp-receiver", "udp-socket", sm.udpSocket.LocalAddr().String())
go func() {
pprof.Do(ctx, managementLabels, sm.socketManagement)
}()
go func() {
pprof.Do(ctx, receiverLabels, sm.udpMessageReceiver)
}()
}
func (sm *socketManager) LocalAddr() net.Addr {
return sm.udpSocket.LocalAddr()
}
func (sm *socketManager) socketManagement(ctx context.Context) {
timer := time.NewTimer(sm.pollInterval)
defer timer.Stop()
for {
timer.Reset(sm.pollInterval)
select {
case <-ctx.Done():
// at this point, all attached Conn instances should be
// closed already
sm.internalClose()
return
case <-timer.C:
}
sm.checkTimeouts()
}
}
func (sm *socketManager) processIncomingPacket(data []byte, destAddr *net.UDPAddr) {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.IsIncomingUTP(gotIncomingConnectionCallback, packetSendCallback, sm, data, destAddr)
}
func (sm *socketManager) checkTimeouts() {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.CheckTimeouts()
}
func (sm *socketManager) internalClose() {
err := sm.udpSocket.Close()
sm.mx = nil
sm.closeErr <- err
close(sm.closeErr)
close(sm.acceptChan)
}
func (sm *socketManager) incrementReferences() {
sm.refCountLock.Lock()
sm.refCount++
sm.refCountLock.Unlock()
}
func (sm *socketManager) decrementReferences() error {
sm.refCountLock.Lock()
defer sm.refCountLock.Unlock()
sm.refCount--
if sm.refCount == 0 {
sm.logger.V(1).Info("closing socketManager")
sm.cancelManagement()
return <-sm.closeErr
}
if sm.refCount < 0 {
return errors.New("socketManager closed too many times")
}
return nil
}
func (sm *socketManager) udpMessageReceiver(ctx context.Context) {
// thread-safe; don't need baseConnLock for GetUDPMTU
bufSize := libutp.GetUDPMTU(sm.LocalAddr().(*net.UDPAddr))
// It turns out GetUDPMTU is frequently wrong, and when it gives us a lower
// number than the real MTU, and the other side is sending bigger packets,
// then we end up not being able to read the full packets. Start with a
// receive buffer twice as big as we thought we might need, and increase it
// further from there if needed.
bufSize *= 2
sm.logger.V(0).Info("udp message receiver started", "receive-buf-size", bufSize, "local-addr", sm.LocalAddr())
b := make([]byte, bufSize)
for {
n, _, flags, addr, err := sm.udpSocket.ReadMsgUDP(b, nil)
if err != nil {
if ctx.Err() != nil {
// we expect an error here; the socket has been closed; it's fine
return
}
sm.registerSocketError(err)
continue
}
if flags & syscall.MSG_TRUNC != 0 {
// we didn't get the whole packet. don't pass it on to µTP; it
// won't recognize the truncation and will pretend like that's
// all the data there is. let the packet loss detection stuff
// do its part instead.
continue
}
sm.logger.V(10).Info("udp received bytes", "len", n, "remote-addr", addr)
sm.processIncomingPacket(b[:n], addr)
}
}
func (sm *socketManager) registerSocketError(err error) {
sm.socketErrorsLock.Lock()
defer sm.socketErrorsLock.Unlock()
sm.logger.Error(err, "socket error")
sm.socketErrors = append(sm.socketErrors, err)
}
func gotIncomingConnectionCallback(userdata interface{}, newBaseConn *libutp.Socket) {
sm := userdata.(*socketManager)
remoteAddr := sm.udpSocket.RemoteAddr()
if remoteAddr != nil {
// this is not a listening-mode socket! we'll reject this spurious packet
_ = newBaseConn.Close()
return
}
sm.incrementReferences()
connLogger := sm.logger.WithName("utp-socket").WithValues("dir", "in", "remote-addr", newBaseConn.GetPeerName())
newUTPConn := &Conn{
utpSocket: utpSocket{
localAddr: sm.LocalAddr().(*net.UDPAddr),
manager: sm,
},
logger: connLogger,
baseConn: newBaseConn,
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
newBaseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, newUTPConn)
sm.logger.V(1).Info("accepted new connection", "remote-addr", newUTPConn.RemoteAddr())
select {
case sm.acceptChan <- newUTPConn:
// it's the socketManager's problem now
default:
sm.logger.Info("dropping new connection because full backlog", "remote-addr", newUTPConn.RemoteAddr())
// The accept backlog is full; drop this new connection. We can't call
// (*Conn).Close() from here, because the baseConnLock is already held.
// Fortunately, most of the steps done there aren't necessary here
// because we have never exposed this instance to the user.
_ = newUTPConn.baseConn.Close()
// This step will decref the socketManager back to where it was before
// this instance was created.
_ = newUTPConn.manager.decrementReferences()
newUTPConn.manager = nil
}
}
func packetSendCallback(userdata interface{}, buf []byte, addr *net.UDPAddr) {
sm := userdata.(*socketManager)
sm.logger.V(10).Info("udp sending bytes", "len", len(buf), "remote-addr", addr.String())
_, err := sm.udpSocket.WriteToUDP(buf, addr)
if err != nil {
sm.registerSocketError(err)
}
}
func onReadCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
c.stateDebugLogLocked("entering onReadCallback", "got-bytes", len(buf))
isClosing := c.willClose
c.stateLock.Unlock()
if isClosing {
// the local side has closed the connection; they don't want any additional data
return
}
if ok := c.readBuffer.TryAppend(buf); !ok {
// I think this should not happen; the flow control mechanism should
// keep us from getting more data than the (libutp-level) receive
// buffer can hold.
used := c.readBuffer.SpaceUsed()
avail := c.readBuffer.SpaceAvailable()
c.logger.Error(nil, "receive buffer overflow", "buffer-size", used+avail, "buffer-holds", c.readBuffer.SpaceUsed(), "new-data", len(buf))
panic("receive buffer overflow")
}
c.stateDebugLog("finishing onReadCallback")
}
func onWriteCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.stateDebugLogLocked("entering onWriteCallback", "accepting-bytes", len(buf))
ok := c.writeBuffer.TryConsumeFull(buf)
if !ok {
// I think this should not happen; this callback should only be called
// with data less than or equal to the number we pass in with
// libutp.(*Socket).Write(). That gets passed in under the
// baseConnLock, and this gets called under that same lock, so it also
// shouldn't be possible for something to pull data from the write
// buffer between that point and this point.
panic("send buffer underflow")
}
c.stateDebugLogLocked("finishing onWriteCallback")
}
func getRBSizeCallback(userdata interface{}) int {
c := userdata.(*Conn)
return c.readBuffer.SpaceUsed()
}
func (c *Conn) onConnectOrWritable(state libutp.State) {
c.stateLock.Lock()
c.stateDebugLogLocked("entering onConnectOrWritable", "libutp-state", state)
if c.connecting {
c.connecting = false
close(c.connectChan)
}
c.stateLock.Unlock()
if writeAmount := c.writeBuffer.SpaceUsed(); writeAmount > 0 {
c.logger.V(10).Info("initiating write to libutp layer", "len", writeAmount)
c.baseConn.Write(writeAmount)
} else {
c.logger.V(10).Info("nothing to write")
}
c.stateDebugLog("finishing onConnectOrWritable")
}
func (c *Conn) onConnectionFailure(err error) {
c.stateDebugLog("entering onConnectionFailure", "err-text", err.Error())
// mark EOF as encountered error, so that it gets returned from
// subsequent Read calls
c.setEncounteredError(err)
// clear out write buffer; we won't be able to send it now. If a call
// to Close() is already waiting, we don't need to make it wait any
// longer
c.writeBuffer.Close()
// this will allow any pending reads to complete (as short reads)
c.readBuffer.CloseForWrites()
c.stateDebugLog("finishing onConnectionFailure")
}
// the baseConnLock should already be held when this callback is entered
func onStateCallback(userdata interface{}, state libutp.State) {
c := userdata.(*Conn)
switch state {
case libutp.StateConnect, libutp.StateWritable:
c.onConnectOrWritable(state)
case libutp.StateEOF:
c.onConnectionFailure(io.EOF)
case libutp.StateDestroying:
close(c.baseConnDestroyed)
}
}
// This could be ECONNRESET, ECONNREFUSED, or ETIMEDOUT.
//
// the baseConnLock should already be held when this callback is entered
func onErrorCallback(userdata interface{}, err error) {
c := userdata.(*Conn)
c.logger.Error(err, "onError callback from libutp layer")
// we have to treat this like a total connection failure
c.onConnectionFailure(err)
// and we have to cover a corner case where this error was encountered
// _during_ the libutp Close() call- in this case, libutp would sit
// forever and never get to StateDestroying, so we have to prod it again.
if c.libutpClosed {
if err := c.baseConn.Close(); err != nil {
c.logger.Error(err, "error from libutp layer Close()")
}
}
}
func ResolveUTPAddr(network, address string) (*Addr, error) {
switch network {
case "utp", "utp4", "utp6":
udpNetwork := "udp" + network[3:]
udpAddr, err := net.ResolveUDPAddr(udpNetwork, address)
if err != nil {
return nil, err
}
return (*Addr)(udpAddr), nil
}
return nil, net.UnknownNetworkError(network)
} | close(c.connectChan)
}
}
| random_line_split |
utpgo.go | // Copyright (c) 2021 Storj Labs, Inc.
// Copyright (c) 2010 BitTorrent, Inc.
// See LICENSE for copying information.
package utp
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"os"
"runtime/pprof"
"sync"
"syscall"
"time"
"github.com/go-logr/logr"
"storj.io/utp-go/buffers"
"storj.io/utp-go/libutp"
)
// Buffer for data before it gets to µTP (there is another "send buffer" in
// the libutp code, but it is for managing flow control window sizes.
const (
readBufferSize = 200000
writeBufferSize = 200000
)
var noopLogger = logr.DiscardLogger{}
type Addr net.UDPAddr
func (a *Addr) Network() string { return "utp" }
func (a *Addr) String() string { return (*net.UDPAddr)(a).String() }
type Conn struct {
utpSocket
logger logr.Logger
baseConn *libutp.Socket
// set to true if the socket will close once the write buffer is empty
willClose bool
// set to true once the libutp-layer Close has been called
libutpClosed bool
// set to true when the socket has been closed by the remote side (or the
// conn has experienced a timeout or other fatal error)
remoteIsDone bool
// set to true if a read call is pending
readPending bool
// set to true if a write call is pending
writePending bool
// closed when Close() is called
closeChan chan struct{}
// closed when baseConn has entered StateDestroying
baseConnDestroyed chan struct{}
// readBuffer tracks data that has been read on a particular Conn, but
// not yet consumed by the application.
readBuffer *buffers.SyncCircularBuffer
// writeBuffer tracks data that needs to be sent on this Conn, which is
// has not yet been collected by µTP.
writeBuffer *buffers.SyncCircularBuffer
readDeadline time.Time
writeDeadline time.Time
// Set to true while waiting for a connection to complete (got
// state=StateConnect). The connectChan channel will be closed once this
// is set.
connecting bool
connectChan chan struct{}
}
type Listener struct {
utpSocket
acceptChan <-chan *Conn
}
// utpSocket is shared functionality between Conn and Listener.
type utpSocket struct {
localAddr *net.UDPAddr
// manager is shared by all sockets using the same local address
// (for outgoing connections, only the one connection, but for incoming
// connections, this includes all connections received by the associated
// listening socket). It is reference-counted, and thus will only be
// cleaned up entirely when the last related socket is closed.
manager *socketManager
// changes to encounteredError, manager, or other state variables in Conn
// or Listener should all be protected with this lock. If it must be
// acquired at the same time as manager.baseConnLock, the
// manager.baseConnLock must be acquired first.
stateLock sync.Mutex
// Once set, all further Write/Read operations should fail with this error.
encounteredError error
}
func Dial(network, address string) (net.Conn, error) {
return DialOptions(network, address)
}
func DialContext(ctx context.Context, network, address string) (net.Conn, error) {
return DialOptions(network, address, WithContext(ctx))
}
func DialOptions(network, address string, options ...ConnectOption) (net.Conn, error) {
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
rAddr, err := ResolveUTPAddr(network, address)
if err != nil {
return nil, err
}
return DialUTPOptions(network, nil, rAddr, options...)
}
func DialUTP(network string, localAddr, remoteAddr *Addr) (net.Conn, error) {
return DialUTPOptions(network, localAddr, remoteAddr)
}
func DialUTPOptions(network string, localAddr, remoteAddr *Addr, options ...ConnectOption) (net.Conn, error) {
s := utpDialState{
logger: &noopLogger,
ctx: context.Background(),
tlsConfig: nil,
}
for _, opt := range options {
opt.apply(&s)
}
conn, err := dial(s.ctx, s.logger, network, localAddr, remoteAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.Client(conn, s.tlsConfig), nil
}
return conn, nil
}
func dial(ctx context.Context, logger logr.Logger, network string, localAddr, remoteAddr *Addr) (*Conn, error) {
managerLogger := logger.WithValues("remote-addr", remoteAddr)
manager, err := newSocketManager(managerLogger, network, (*net.UDPAddr)(localAddr), (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
localUDPAddr := manager.LocalAddr().(*net.UDPAddr)
// different from managerLogger in case local addr interface and/or port
// has been clarified
connLogger := logger.WithValues("local-addr", localUDPAddr, "remote-addr", remoteAddr, "dir", "out")
utpConn := &Conn{
utpSocket: utpSocket{
localAddr: localUDPAddr,
manager: manager,
},
logger: connLogger.WithName("utp-conn"),
connecting: true,
connectChan: make(chan struct{}),
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
connLogger.V(10).Info("creating outgoing socket")
// thread-safe here, because no other goroutines could have a handle to
// this mx yet.
utpConn.baseConn, err = manager.mx.Create(packetSendCallback, manager, (*net.UDPAddr)(remoteAddr))
if err != nil {
return nil, err
}
utpConn.baseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, utpConn)
utpConn.baseConn.SetLogger(connLogger.WithName("utp-socket"))
manager.start()
func() {
// now that the manager's goroutines have started, we do need
// concurrency protection
manager.baseConnLock.Lock()
defer manager.baseConnLock.Unlock()
connLogger.V(10).Info("initiating libutp-level Connect()")
utpConn.baseConn.Connect()
}()
select {
case <-ctx.Done():
_ = utpConn.Close()
return nil, ctx.Err()
case <-utpConn.connectChan:
}
// connection operation is complete, successful or not; record any error met
utpConn.stateLock.Lock()
err = utpConn.encounteredError
utpConn.stateLock.Unlock()
if err != nil {
_ = utpConn.Close()
return nil, utpConn.makeOpError("dial", err)
}
return utpConn, nil
}
func Listen(network string, addr string) (net.Listener, error) {
return ListenOptions(network, addr)
}
func ListenOptions(network, addr string, options ...ConnectOption) (net.Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
switch network {
case "utp", "utp4", "utp6":
default:
return nil, fmt.Errorf("network %s not supported", network)
}
udpAddr, err := ResolveUTPAddr(network, addr)
if err != nil {
return nil, err
}
listener, err := listen(s.logger, network, udpAddr)
if err != nil {
return nil, err
}
if s.tlsConfig != nil {
return tls.NewListener(listener, s.tlsConfig), nil
}
return listener, nil
}
func ListenUTP(network string, localAddr *Addr) (*Listener, error) {
return listen(&noopLogger, network, localAddr)
}
func ListenUTPOptions(network string, localAddr *Addr, options ...ConnectOption) (*Listener, error) {
s := utpDialState{
logger: &noopLogger,
}
for _, opt := range options {
opt.apply(&s)
}
return listen(s.logger, network, localAddr)
}
func listen(logger logr.Logger, network string, localAddr *Addr) (*Listener, error) {
manager, err := newSocketManager(logger, network, (*net.UDPAddr)(localAddr), nil)
if err != nil {
return nil, err
}
udpLocalAddr := manager.LocalAddr().(*net.UDPAddr)
utpListener := &Listener{
utpSocket: utpSocket{
localAddr: udpLocalAddr,
manager: manager,
},
acceptChan: manager.acceptChan,
}
manager.start()
return utpListener, nil
}
type utpDialState struct {
logger logr.Logger
ctx context.Context
tlsConfig *tls.Config
}
type ConnectOption interface {
apply(s *utpDialState)
}
type optionLogger struct {
logger logr.Logger
}
func (o *optionLogger) apply(s *utpDialState) {
s.logger = o.logger
}
func WithLogger(logger logr.Logger) ConnectOption {
return &optionLogger{logger: logger}
}
type optionContext struct {
ctx context.Context
}
func (o *optionContext) apply(s *utpDialState) {
s.ctx = o.ctx
}
func WithContext(ctx context.Context) ConnectOption {
return &optionContext{ctx: ctx}
}
type optionTLS struct {
tlsConfig *tls.Config
}
func (o *optionTLS) apply(s *utpDialState) {
s.tlsConfig = o.tlsConfig
}
func WithTLS(tlsConfig *tls.Config) ConnectOption {
return &optionTLS{tlsConfig: tlsConfig}
}
func (c *Conn) Close() error {
// indicate our desire to close; once buffers are flushed, we can continue
c.stateLock.Lock()
if c.willClose {
c.stateLock.Unlock()
return errors.New("multiple calls to Close() not allowed")
}
c.willClose = true
c.stateLock.Unlock()
// wait for write buffer to be flushed
c.writeBuffer.FlushAndClose()
// if there are still any blocked reads, shut them down
c.readBuffer.Close()
// close baseConn
err := func() error {
// yes, even libutp.(*UTPSocket).Close() needs concurrency protection;
// it may end up invoking callbacks
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
c.logger.V(10).Info("closing baseConn")
c.libutpClosed = true
return c.baseConn.Close()
}()
// wait for socket to enter StateDestroying
<-c.baseConnDestroyed
c.setEncounteredError(net.ErrClosed)
socketCloseErr := c.utpSocket.Close()
// even if err was already set, this one is likely to be more helpful/interesting.
if socketCloseErr != nil {
err = socketCloseErr
}
return err
}
func (c *Conn) SetLogger(logger logr.Logger) {
c.baseConn.SetLogger(logger)
}
func (c *Conn) Read(buf []byte) (n int, err error) {
return c.ReadContext(context.Background(), buf)
}
func (c *Conn) stateEnterRead() error {
switch {
case c.readPending:
return buffers.ReaderAlreadyWaitingErr
case c.willClose:
return c.makeOpError("read", net.ErrClosed)
case c.remoteIsDone && c.readBuffer.SpaceUsed() == 0:
return c.makeOpError("read", c.encounteredError)
}
c.readPending = true
return nil
}
func (c *Conn) ReadContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
encounteredErr := c.encounteredError
deadline := c.readDeadline
err = c.stateEnterRead()
c.stateLock.Unlock()
if err != nil {
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readPending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
var ok bool
n, ok = c.readBuffer.TryConsume(buf)
if ok {
if n == 0 {
return 0, io.EOF
}
return n, nil
}
if encounteredErr != nil {
return 0, c.makeOpError("read", encounteredErr)
}
waitChan, cancelWait, err := c.readBuffer.WaitForBytesChan(1)
if err != nil {
return 0, err
}
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("read", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("read", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) Write(buf []byte) (n int, err error) {
| func (c *Conn) WriteContext(ctx context.Context, buf []byte) (n int, err error) {
c.stateLock.Lock()
if c.writePending {
c.stateLock.Unlock()
return 0, buffers.WriterAlreadyWaitingErr
}
c.writePending = true
deadline := c.writeDeadline
c.stateLock.Unlock()
if err != nil {
if err == io.EOF {
// remote side closed connection cleanly, and µTP in/out streams
// are not independently closeable. Doesn't make sense to return
// an EOF from a Write method, so..
err = c.makeOpError("write", syscall.ECONNRESET)
} else if err == net.ErrClosed {
err = c.makeOpError("write", net.ErrClosed)
}
return 0, err
}
defer func() {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writePending = false
}()
if !deadline.IsZero() {
var cancel func()
ctx, cancel = context.WithDeadline(ctx, deadline)
defer cancel()
}
for {
c.stateLock.Lock()
willClose := c.willClose
remoteIsDone := c.remoteIsDone
encounteredError := c.encounteredError
c.stateLock.Unlock()
if willClose {
return 0, c.makeOpError("write", net.ErrClosed)
}
if remoteIsDone {
return 0, c.makeOpError("write", encounteredError)
}
if ok := c.writeBuffer.TryAppend(buf); ok {
// make sure µTP knows about the new bytes. this might be a bit
// confusing, but it doesn't matter if other writes occur between
// the TryAppend() above and the acquisition of the baseConnLock
// below. All that matters is that (a) there is at least one call
// to baseConn.Write scheduled to be made after this point (without
// undue blocking); (b) baseConnLock is held when that Write call
// is made; and (c) the amount of data in the write buffer does not
// decrease between the SpaceUsed() call and the start of the next
// call to onWriteCallback.
func() {
c.manager.baseConnLock.Lock()
defer c.manager.baseConnLock.Unlock()
amount := c.writeBuffer.SpaceUsed()
c.logger.V(10).Info("informing libutp layer of data for writing", "len", amount)
c.baseConn.Write(amount)
}()
return len(buf), nil
}
waitChan, cancelWait, err := c.writeBuffer.WaitForSpaceChan(len(buf))
if err != nil {
if err == buffers.IsClosedErr {
err = c.makeOpError("write", c.encounteredError)
}
return 0, err
}
// couldn't write the data yet; wait until we can, or until we hit the
// timeout, or until the conn is closed.
select {
case <-ctx.Done():
cancelWait()
err = ctx.Err()
if errors.Is(err, context.DeadlineExceeded) {
// transform deadline error to os.ErrDeadlineExceeded as per
// net.Conn specification
err = c.makeOpError("write", os.ErrDeadlineExceeded)
}
return 0, err
case <-c.closeChan:
cancelWait()
return 0, c.makeOpError("write", net.ErrClosed)
case <-waitChan:
}
}
}
func (c *Conn) RemoteAddr() net.Addr {
// GetPeerName is thread-safe
return (*Addr)(c.baseConn.GetPeerName())
}
func (c *Conn) SetReadDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.readDeadline = t
return nil
}
func (c *Conn) SetWriteDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
return nil
}
func (c *Conn) SetDeadline(t time.Time) error {
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.writeDeadline = t
c.readDeadline = t
return nil
}
func (c *Conn) makeOpError(op string, err error) error {
opErr := c.utpSocket.makeOpError(op, err).(*net.OpError)
opErr.Source = opErr.Addr
opErr.Addr = c.RemoteAddr()
return opErr
}
var _ net.Conn = &Conn{}
func (l *Listener) AcceptUTPContext(ctx context.Context) (*Conn, error) {
select {
case newConn, ok := <-l.acceptChan:
if ok {
return newConn, nil
}
err := l.encounteredError
if err == nil {
err = l.makeOpError("accept", net.ErrClosed)
}
return nil, err
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (l *Listener) AcceptUTP() (*Conn, error) {
return l.AcceptUTPContext(context.Background())
}
func (l *Listener) Accept() (net.Conn, error) {
return l.AcceptUTP()
}
func (l *Listener) AcceptContext(ctx context.Context) (net.Conn, error) {
return l.AcceptUTPContext(ctx)
}
func (l *Listener) Close() error {
return l.utpSocket.Close()
}
func (l *Listener) Addr() net.Addr {
return l.utpSocket.LocalAddr()
}
var _ net.Listener = &Listener{}
func (u *utpSocket) makeOpError(op string, err error) error {
return &net.OpError{
Op: op,
Net: "utp",
Source: nil,
Addr: u.LocalAddr(),
Err: err,
}
}
func (u *utpSocket) Close() (err error) {
u.stateLock.Lock()
if u.manager != nil {
err = u.manager.decrementReferences()
u.manager = nil
}
u.stateLock.Unlock()
return err
}
func (c *Conn) setEncounteredError(err error) {
if err == nil {
return
}
c.stateLock.Lock()
defer c.stateLock.Unlock()
// keep the first error if this is called multiple times
if c.encounteredError == nil {
c.encounteredError = err
}
if c.connecting {
c.connecting = false
close(c.connectChan)
}
}
func (u *utpSocket) LocalAddr() net.Addr {
return (*Addr)(u.localAddr)
}
type socketManager struct {
mx *libutp.SocketMultiplexer
logger logr.Logger
udpSocket *net.UDPConn
// this lock should be held when invoking any libutp functions or methods
// that are not thread-safe or which themselves might invoke callbacks
// (that is, nearly all libutp functions or methods). It can be assumed
// that this lock is held in callbacks.
baseConnLock sync.Mutex
refCountLock sync.Mutex
refCount int
// cancelManagement is a cancel function that should be called to close
// down the socket management goroutines. The main managing goroutine
// should clean up and return any close error on closeErr.
cancelManagement func()
// closeErr is a channel on which the managing goroutine will return any
// errors from a close operation when all is complete.
closeErr chan error
// to be allocated with a buffer the size of the intended backlog. There
// can be at most one utpSocket able to receive on this channel (one
// Listener for any given UDP socket).
acceptChan chan *Conn
// just a way to accumulate errors in sending or receiving on the UDP
// socket; this may cause future Write/Read method calls to return the
// error in the future
socketErrors []error
socketErrorsLock sync.Mutex
pollInterval time.Duration
}
const (
defaultUTPConnBacklogSize = 5
)
func newSocketManager(logger logr.Logger, network string, localAddr, remoteAddr *net.UDPAddr) (*socketManager, error) {
switch network {
case "utp", "utp4", "utp6":
default:
op := "dial"
if remoteAddr == nil {
op = "listen"
}
return nil, &net.OpError{Op: op, Net: network, Source: localAddr, Addr: remoteAddr, Err: net.UnknownNetworkError(network)}
}
udpNetwork := "udp" + network[3:]
// thread-safe here; don't need baseConnLock
mx := libutp.NewSocketMultiplexer(logger.WithName("mx").WithValues("local-addr", localAddr.String()), nil)
udpSocket, err := net.ListenUDP(udpNetwork, localAddr)
if err != nil {
return nil, err
}
sm := &socketManager{
mx: mx,
logger: logger.WithName("manager").WithValues("local-addr", udpSocket.LocalAddr()),
udpSocket: udpSocket,
refCount: 1,
closeErr: make(chan error),
acceptChan: make(chan *Conn, defaultUTPConnBacklogSize),
pollInterval: 5 * time.Millisecond,
}
return sm, nil
}
func (sm *socketManager) start() {
ctx, cancel := context.WithCancel(context.Background())
sm.cancelManagement = cancel
managementLabels := pprof.Labels(
"name", "socket-management", "udp-socket", sm.udpSocket.LocalAddr().String())
receiverLabels := pprof.Labels(
"name", "udp-receiver", "udp-socket", sm.udpSocket.LocalAddr().String())
go func() {
pprof.Do(ctx, managementLabels, sm.socketManagement)
}()
go func() {
pprof.Do(ctx, receiverLabels, sm.udpMessageReceiver)
}()
}
func (sm *socketManager) LocalAddr() net.Addr {
return sm.udpSocket.LocalAddr()
}
func (sm *socketManager) socketManagement(ctx context.Context) {
timer := time.NewTimer(sm.pollInterval)
defer timer.Stop()
for {
timer.Reset(sm.pollInterval)
select {
case <-ctx.Done():
// at this point, all attached Conn instances should be
// closed already
sm.internalClose()
return
case <-timer.C:
}
sm.checkTimeouts()
}
}
func (sm *socketManager) processIncomingPacket(data []byte, destAddr *net.UDPAddr) {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.IsIncomingUTP(gotIncomingConnectionCallback, packetSendCallback, sm, data, destAddr)
}
func (sm *socketManager) checkTimeouts() {
sm.baseConnLock.Lock()
defer sm.baseConnLock.Unlock()
sm.mx.CheckTimeouts()
}
func (sm *socketManager) internalClose() {
err := sm.udpSocket.Close()
sm.mx = nil
sm.closeErr <- err
close(sm.closeErr)
close(sm.acceptChan)
}
func (sm *socketManager) incrementReferences() {
sm.refCountLock.Lock()
sm.refCount++
sm.refCountLock.Unlock()
}
func (sm *socketManager) decrementReferences() error {
sm.refCountLock.Lock()
defer sm.refCountLock.Unlock()
sm.refCount--
if sm.refCount == 0 {
sm.logger.V(1).Info("closing socketManager")
sm.cancelManagement()
return <-sm.closeErr
}
if sm.refCount < 0 {
return errors.New("socketManager closed too many times")
}
return nil
}
func (sm *socketManager) udpMessageReceiver(ctx context.Context) {
// thread-safe; don't need baseConnLock for GetUDPMTU
bufSize := libutp.GetUDPMTU(sm.LocalAddr().(*net.UDPAddr))
// It turns out GetUDPMTU is frequently wrong, and when it gives us a lower
// number than the real MTU, and the other side is sending bigger packets,
// then we end up not being able to read the full packets. Start with a
// receive buffer twice as big as we thought we might need, and increase it
// further from there if needed.
bufSize *= 2
sm.logger.V(0).Info("udp message receiver started", "receive-buf-size", bufSize, "local-addr", sm.LocalAddr())
b := make([]byte, bufSize)
for {
n, _, flags, addr, err := sm.udpSocket.ReadMsgUDP(b, nil)
if err != nil {
if ctx.Err() != nil {
// we expect an error here; the socket has been closed; it's fine
return
}
sm.registerSocketError(err)
continue
}
if flags & syscall.MSG_TRUNC != 0 {
// we didn't get the whole packet. don't pass it on to µTP; it
// won't recognize the truncation and will pretend like that's
// all the data there is. let the packet loss detection stuff
// do its part instead.
continue
}
sm.logger.V(10).Info("udp received bytes", "len", n, "remote-addr", addr)
sm.processIncomingPacket(b[:n], addr)
}
}
func (sm *socketManager) registerSocketError(err error) {
sm.socketErrorsLock.Lock()
defer sm.socketErrorsLock.Unlock()
sm.logger.Error(err, "socket error")
sm.socketErrors = append(sm.socketErrors, err)
}
func gotIncomingConnectionCallback(userdata interface{}, newBaseConn *libutp.Socket) {
sm := userdata.(*socketManager)
remoteAddr := sm.udpSocket.RemoteAddr()
if remoteAddr != nil {
// this is not a listening-mode socket! we'll reject this spurious packet
_ = newBaseConn.Close()
return
}
sm.incrementReferences()
connLogger := sm.logger.WithName("utp-socket").WithValues("dir", "in", "remote-addr", newBaseConn.GetPeerName())
newUTPConn := &Conn{
utpSocket: utpSocket{
localAddr: sm.LocalAddr().(*net.UDPAddr),
manager: sm,
},
logger: connLogger,
baseConn: newBaseConn,
closeChan: make(chan struct{}),
baseConnDestroyed: make(chan struct{}),
readBuffer: buffers.NewSyncBuffer(readBufferSize),
writeBuffer: buffers.NewSyncBuffer(writeBufferSize),
}
newBaseConn.SetCallbacks(&libutp.CallbackTable{
OnRead: onReadCallback,
OnWrite: onWriteCallback,
GetRBSize: getRBSizeCallback,
OnState: onStateCallback,
OnError: onErrorCallback,
}, newUTPConn)
sm.logger.V(1).Info("accepted new connection", "remote-addr", newUTPConn.RemoteAddr())
select {
case sm.acceptChan <- newUTPConn:
// it's the socketManager's problem now
default:
sm.logger.Info("dropping new connection because full backlog", "remote-addr", newUTPConn.RemoteAddr())
// The accept backlog is full; drop this new connection. We can't call
// (*Conn).Close() from here, because the baseConnLock is already held.
// Fortunately, most of the steps done there aren't necessary here
// because we have never exposed this instance to the user.
_ = newUTPConn.baseConn.Close()
// This step will decref the socketManager back to where it was before
// this instance was created.
_ = newUTPConn.manager.decrementReferences()
newUTPConn.manager = nil
}
}
func packetSendCallback(userdata interface{}, buf []byte, addr *net.UDPAddr) {
sm := userdata.(*socketManager)
sm.logger.V(10).Info("udp sending bytes", "len", len(buf), "remote-addr", addr.String())
_, err := sm.udpSocket.WriteToUDP(buf, addr)
if err != nil {
sm.registerSocketError(err)
}
}
func onReadCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
c.stateDebugLogLocked("entering onReadCallback", "got-bytes", len(buf))
isClosing := c.willClose
c.stateLock.Unlock()
if isClosing {
// the local side has closed the connection; they don't want any additional data
return
}
if ok := c.readBuffer.TryAppend(buf); !ok {
// I think this should not happen; the flow control mechanism should
// keep us from getting more data than the (libutp-level) receive
// buffer can hold.
used := c.readBuffer.SpaceUsed()
avail := c.readBuffer.SpaceAvailable()
c.logger.Error(nil, "receive buffer overflow", "buffer-size", used+avail, "buffer-holds", c.readBuffer.SpaceUsed(), "new-data", len(buf))
panic("receive buffer overflow")
}
c.stateDebugLog("finishing onReadCallback")
}
func onWriteCallback(userdata interface{}, buf []byte) {
c := userdata.(*Conn)
c.stateLock.Lock()
defer c.stateLock.Unlock()
c.stateDebugLogLocked("entering onWriteCallback", "accepting-bytes", len(buf))
ok := c.writeBuffer.TryConsumeFull(buf)
if !ok {
// I think this should not happen; this callback should only be called
// with data less than or equal to the number we pass in with
// libutp.(*Socket).Write(). That gets passed in under the
// baseConnLock, and this gets called under that same lock, so it also
// shouldn't be possible for something to pull data from the write
// buffer between that point and this point.
panic("send buffer underflow")
}
c.stateDebugLogLocked("finishing onWriteCallback")
}
func getRBSizeCallback(userdata interface{}) int {
c := userdata.(*Conn)
return c.readBuffer.SpaceUsed()
}
func (c *Conn) onConnectOrWritable(state libutp.State) {
c.stateLock.Lock()
c.stateDebugLogLocked("entering onConnectOrWritable", "libutp-state", state)
if c.connecting {
c.connecting = false
close(c.connectChan)
}
c.stateLock.Unlock()
if writeAmount := c.writeBuffer.SpaceUsed(); writeAmount > 0 {
c.logger.V(10).Info("initiating write to libutp layer", "len", writeAmount)
c.baseConn.Write(writeAmount)
} else {
c.logger.V(10).Info("nothing to write")
}
c.stateDebugLog("finishing onConnectOrWritable")
}
func (c *Conn) onConnectionFailure(err error) {
c.stateDebugLog("entering onConnectionFailure", "err-text", err.Error())
// mark EOF as encountered error, so that it gets returned from
// subsequent Read calls
c.setEncounteredError(err)
// clear out write buffer; we won't be able to send it now. If a call
// to Close() is already waiting, we don't need to make it wait any
// longer
c.writeBuffer.Close()
// this will allow any pending reads to complete (as short reads)
c.readBuffer.CloseForWrites()
c.stateDebugLog("finishing onConnectionFailure")
}
// the baseConnLock should already be held when this callback is entered
func onStateCallback(userdata interface{}, state libutp.State) {
c := userdata.(*Conn)
switch state {
case libutp.StateConnect, libutp.StateWritable:
c.onConnectOrWritable(state)
case libutp.StateEOF:
c.onConnectionFailure(io.EOF)
case libutp.StateDestroying:
close(c.baseConnDestroyed)
}
}
// This could be ECONNRESET, ECONNREFUSED, or ETIMEDOUT.
//
// the baseConnLock should already be held when this callback is entered
func onErrorCallback(userdata interface{}, err error) {
c := userdata.(*Conn)
c.logger.Error(err, "onError callback from libutp layer")
// we have to treat this like a total connection failure
c.onConnectionFailure(err)
// and we have to cover a corner case where this error was encountered
// _during_ the libutp Close() call- in this case, libutp would sit
// forever and never get to StateDestroying, so we have to prod it again.
if c.libutpClosed {
if err := c.baseConn.Close(); err != nil {
c.logger.Error(err, "error from libutp layer Close()")
}
}
}
func ResolveUTPAddr(network, address string) (*Addr, error) {
switch network {
case "utp", "utp4", "utp6":
udpNetwork := "udp" + network[3:]
udpAddr, err := net.ResolveUDPAddr(udpNetwork, address)
if err != nil {
return nil, err
}
return (*Addr)(udpAddr), nil
}
return nil, net.UnknownNetworkError(network)
}
| return c.WriteContext(context.Background(), buf)
}
| identifier_body |
chain_spec.rs | // Copyright 2018-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate chain configurations.
use grandpa_primitives::AuthorityId as GrandpaId;
use hex_literal::hex;
use node_runtime::constants::currency::*;
use node_runtime::Block;
use node_runtime::{
AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig,
IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig,
WASM_BINARY,
};
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
use sc_chain_spec::ChainSpecExtension;
use sc_service::Properties;
use sc_telemetry::TelemetryEndpoints;
use serde::{Deserialize, Serialize};
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public};
use sp_runtime::{
traits::{IdentifyAccount, Verify},
Perbill,
};
pub use node_primitives::{AccountId, Balance, Signature};
pub use node_runtime::GenesisConfig;
type AccountPublic = <Signature as Verify>::Signer;
const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Node `ChainSpec` extensions.
///
/// Additional parameters for some Substrate core modules,
/// customizable from the chain spec.
#[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)]
pub struct Extensions {
/// Block numbers with known hashes.
pub fork_blocks: sc_client::ForkBlocks<Block>,
}
/// Specialized `ChainSpec`.
pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>;
/// IceFrog testnet generator
pub fn icefrog_testnet_config() -> Result<ChainSpec, String> {
ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..])
}
fn session_keys(
grandpa: GrandpaId,
babe: BabeId,
im_online: ImOnlineId,
authority_discovery: AuthorityDiscoveryId,
) -> SessionKeys {
SessionKeys {
grandpa,
babe,
im_online,
authority_discovery,
}
}
/// Helper function to generate a crypto pair from seed
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Helper function to generate stash, controller and session key from seed
pub fn get_authority_keys_from_seed(
seed: &str,
) -> (
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
) {
(
get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)),
get_account_id_from_seed::<sr25519::Public>(seed),
get_from_seed::<GrandpaId>(seed),
get_from_seed::<BabeId>(seed),
get_from_seed::<ImOnlineId>(seed),
get_from_seed::<AuthorityDiscoveryId>(seed),
)
}
/// Helper function to create GenesisConfig for darwinia
/// is_testnet: under test net we will use Alice & Bob as seed to generate keys,
/// but in production enviroment, these accounts will use preset keys
pub fn darwinia_genesis(
initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
enable_println: bool,
is_testnet: bool,
) -> GenesisConfig {
let eth_relay_authorities: Vec<AccountId> = if is_testnet {
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
]
} else {
vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1]
};
const RING_ENDOWMENT: Balance = 20_000_000 * COIN;
const KTON_ENDOWMENT: Balance = 10 * COIN;
const STASH: Balance = 1000 * COIN;
GenesisConfig {
frame_system: Some(SystemConfig {
code: WASM_BINARY.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_indices: Some(IndicesConfig {
ids: endowed_accounts
.iter()
.cloned()
.chain(initial_authorities.iter().map(|x| x.0.clone()))
.collect::<Vec<_>>(),
}),
pallet_session: Some(SessionConfig {
keys: initial_authorities
.iter()
.map(|x| {
(
x.0.clone(),
session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()),
)
})
.collect::<Vec<_>>(),
}),
// pallet_democracy: Some(DemocracyConfig::default()),
// pallet_collective_Instance1: Some(CouncilConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
// pallet_collective_Instance2: Some(TechnicalCommitteeConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
pallet_contracts: Some(ContractsConfig {
current_schedule: pallet_contracts::Schedule {
enable_println, // this should only be enabled on development chains
..Default::default()
},
gas_price: 1 * MILLI,
}),
pallet_sudo: Some(SudoConfig { key: root_key }),
pallet_babe: Some(BabeConfig { authorities: vec![] }),
pallet_im_online: Some(ImOnlineConfig { keys: vec![] }),
pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }),
pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }),
// pallet_membership_Instance1: Some(Default::default()),
// pallet_treasury: Some(Default::default()),
pallet_ring: Some(BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, RING_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_kton: Some(KtonConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, KTON_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_staking: Some(StakingConfig {
current_era: 0,
validator_count: initial_authorities.len() as u32 * 2,
minimum_validator_count: initial_authorities.len() as u32,
stakers: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator))
.collect(),
invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(),
slash_reward_fraction: Perbill::from_percent(10),
..Default::default()
}),
}
}
/// Staging testnet config.
pub fn staging_testnet_config() -> ChainSpec {
fn staging_testnet_config_genesis() -> GenesisConfig {
// stash, controller, session-key
// generated with secret:
// for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done
// and
// for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done
let initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)> = vec![
(
// 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy
hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(),
// 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq
hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(),
// 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC
hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
),
(
// 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2
hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(),
// 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF
hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(),
// 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE
hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
),
(
// 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp
hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(),
// 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9
hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(),
// 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d
hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
),
(
// 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9
hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(),
// 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn
hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(),
// 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4
hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
),
];
// generated with secret: subkey inspect "$secret"/fir
let root_key: AccountId = hex![
// 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo
"9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809"
]
.into();
let endowed_accounts: Vec<AccountId> = vec![root_key.clone()];
darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true)
}
let boot_nodes = vec![];
ChainSpec::from_genesis(
"Staging Testnet",
"staging_testnet",
staging_testnet_config_genesis,
boot_nodes,
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
None,
None,
Default::default(),
)
}
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec {
fn development_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![get_authority_keys_from_seed("Alice")],
get_account_id_from_seed::<sr25519::Public>("Alice"),
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Development",
"dev",
development_config_genesis,
vec![],
None,
None,
None,
Default::default(),
)
}
/// IceFrog local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec |
/// IceFrog testnet config generator
pub fn gen_icefrog_testnet_config() -> ChainSpec {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
(
hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash
hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(),
get_from_seed::<GrandpaId>("Alice"),
get_from_seed::<BabeId>("Alice"),
get_from_seed::<ImOnlineId>("Alice"),
get_from_seed::<AuthorityDiscoveryId>("Alice"),
),
(
hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash
hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(),
get_from_seed::<GrandpaId>("Bob"),
get_from_seed::<BabeId>("Bob"),
get_from_seed::<ImOnlineId>("Bob"),
get_from_seed::<AuthorityDiscoveryId>("Bob"),
),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
false,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
}
| {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
get_authority_keys_from_seed("Alice"),
get_authority_keys_from_seed("Bob"),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
} | identifier_body |
chain_spec.rs | // Copyright 2018-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate chain configurations.
use grandpa_primitives::AuthorityId as GrandpaId;
use hex_literal::hex;
use node_runtime::constants::currency::*;
use node_runtime::Block;
use node_runtime::{
AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig,
IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig,
WASM_BINARY,
};
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
use sc_chain_spec::ChainSpecExtension;
use sc_service::Properties;
use sc_telemetry::TelemetryEndpoints;
use serde::{Deserialize, Serialize};
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public};
use sp_runtime::{
traits::{IdentifyAccount, Verify},
Perbill,
};
pub use node_primitives::{AccountId, Balance, Signature};
pub use node_runtime::GenesisConfig;
type AccountPublic = <Signature as Verify>::Signer;
const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Node `ChainSpec` extensions.
///
/// Additional parameters for some Substrate core modules,
/// customizable from the chain spec.
#[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)]
pub struct Extensions {
/// Block numbers with known hashes.
pub fork_blocks: sc_client::ForkBlocks<Block>,
}
/// Specialized `ChainSpec`.
pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>;
/// IceFrog testnet generator
pub fn icefrog_testnet_config() -> Result<ChainSpec, String> {
ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..])
}
fn session_keys(
grandpa: GrandpaId,
babe: BabeId,
im_online: ImOnlineId,
authority_discovery: AuthorityDiscoveryId,
) -> SessionKeys {
SessionKeys {
grandpa,
babe,
im_online,
authority_discovery,
}
}
/// Helper function to generate a crypto pair from seed
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Helper function to generate stash, controller and session key from seed
pub fn get_authority_keys_from_seed(
seed: &str,
) -> (
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
) {
(
get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)),
get_account_id_from_seed::<sr25519::Public>(seed),
get_from_seed::<GrandpaId>(seed),
get_from_seed::<BabeId>(seed),
get_from_seed::<ImOnlineId>(seed),
get_from_seed::<AuthorityDiscoveryId>(seed),
)
}
/// Helper function to create GenesisConfig for darwinia
/// is_testnet: under test net we will use Alice & Bob as seed to generate keys,
/// but in production enviroment, these accounts will use preset keys
pub fn darwinia_genesis(
initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
enable_println: bool,
is_testnet: bool,
) -> GenesisConfig {
let eth_relay_authorities: Vec<AccountId> = if is_testnet {
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
]
} else | ;
const RING_ENDOWMENT: Balance = 20_000_000 * COIN;
const KTON_ENDOWMENT: Balance = 10 * COIN;
const STASH: Balance = 1000 * COIN;
GenesisConfig {
frame_system: Some(SystemConfig {
code: WASM_BINARY.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_indices: Some(IndicesConfig {
ids: endowed_accounts
.iter()
.cloned()
.chain(initial_authorities.iter().map(|x| x.0.clone()))
.collect::<Vec<_>>(),
}),
pallet_session: Some(SessionConfig {
keys: initial_authorities
.iter()
.map(|x| {
(
x.0.clone(),
session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()),
)
})
.collect::<Vec<_>>(),
}),
// pallet_democracy: Some(DemocracyConfig::default()),
// pallet_collective_Instance1: Some(CouncilConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
// pallet_collective_Instance2: Some(TechnicalCommitteeConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
pallet_contracts: Some(ContractsConfig {
current_schedule: pallet_contracts::Schedule {
enable_println, // this should only be enabled on development chains
..Default::default()
},
gas_price: 1 * MILLI,
}),
pallet_sudo: Some(SudoConfig { key: root_key }),
pallet_babe: Some(BabeConfig { authorities: vec![] }),
pallet_im_online: Some(ImOnlineConfig { keys: vec![] }),
pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }),
pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }),
// pallet_membership_Instance1: Some(Default::default()),
// pallet_treasury: Some(Default::default()),
pallet_ring: Some(BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, RING_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_kton: Some(KtonConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, KTON_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_staking: Some(StakingConfig {
current_era: 0,
validator_count: initial_authorities.len() as u32 * 2,
minimum_validator_count: initial_authorities.len() as u32,
stakers: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator))
.collect(),
invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(),
slash_reward_fraction: Perbill::from_percent(10),
..Default::default()
}),
}
}
/// Staging testnet config.
pub fn staging_testnet_config() -> ChainSpec {
fn staging_testnet_config_genesis() -> GenesisConfig {
// stash, controller, session-key
// generated with secret:
// for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done
// and
// for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done
let initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)> = vec![
(
// 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy
hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(),
// 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq
hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(),
// 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC
hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
),
(
// 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2
hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(),
// 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF
hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(),
// 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE
hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
),
(
// 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp
hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(),
// 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9
hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(),
// 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d
hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
),
(
// 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9
hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(),
// 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn
hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(),
// 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4
hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
),
];
// generated with secret: subkey inspect "$secret"/fir
let root_key: AccountId = hex![
// 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo
"9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809"
]
.into();
let endowed_accounts: Vec<AccountId> = vec![root_key.clone()];
darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true)
}
let boot_nodes = vec![];
ChainSpec::from_genesis(
"Staging Testnet",
"staging_testnet",
staging_testnet_config_genesis,
boot_nodes,
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
None,
None,
Default::default(),
)
}
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec {
fn development_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![get_authority_keys_from_seed("Alice")],
get_account_id_from_seed::<sr25519::Public>("Alice"),
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Development",
"dev",
development_config_genesis,
vec![],
None,
None,
None,
Default::default(),
)
}
/// IceFrog local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
get_authority_keys_from_seed("Alice"),
get_authority_keys_from_seed("Bob"),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
}
/// IceFrog testnet config generator
pub fn gen_icefrog_testnet_config() -> ChainSpec {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
(
hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash
hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(),
get_from_seed::<GrandpaId>("Alice"),
get_from_seed::<BabeId>("Alice"),
get_from_seed::<ImOnlineId>("Alice"),
get_from_seed::<AuthorityDiscoveryId>("Alice"),
),
(
hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash
hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(),
get_from_seed::<GrandpaId>("Bob"),
get_from_seed::<BabeId>("Bob"),
get_from_seed::<ImOnlineId>("Bob"),
get_from_seed::<AuthorityDiscoveryId>("Bob"),
),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
false,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
}
| {
vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1]
} | conditional_block |
chain_spec.rs | // Copyright 2018-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate chain configurations.
use grandpa_primitives::AuthorityId as GrandpaId;
use hex_literal::hex;
use node_runtime::constants::currency::*;
use node_runtime::Block;
use node_runtime::{
AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig,
IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig,
WASM_BINARY,
};
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
use sc_chain_spec::ChainSpecExtension;
use sc_service::Properties;
use sc_telemetry::TelemetryEndpoints;
use serde::{Deserialize, Serialize};
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public};
use sp_runtime::{
traits::{IdentifyAccount, Verify},
Perbill,
};
pub use node_primitives::{AccountId, Balance, Signature};
pub use node_runtime::GenesisConfig;
type AccountPublic = <Signature as Verify>::Signer;
const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Node `ChainSpec` extensions.
///
/// Additional parameters for some Substrate core modules,
/// customizable from the chain spec.
#[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)]
pub struct Extensions {
/// Block numbers with known hashes.
pub fork_blocks: sc_client::ForkBlocks<Block>,
}
/// Specialized `ChainSpec`.
pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>;
/// IceFrog testnet generator
pub fn icefrog_testnet_config() -> Result<ChainSpec, String> {
ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..])
}
fn session_keys(
grandpa: GrandpaId,
babe: BabeId,
im_online: ImOnlineId,
authority_discovery: AuthorityDiscoveryId,
) -> SessionKeys {
SessionKeys {
grandpa,
babe,
im_online,
authority_discovery,
}
}
/// Helper function to generate a crypto pair from seed
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Helper function to generate stash, controller and session key from seed
pub fn get_authority_keys_from_seed(
seed: &str,
) -> (
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
) {
(
get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)),
get_account_id_from_seed::<sr25519::Public>(seed),
get_from_seed::<GrandpaId>(seed),
get_from_seed::<BabeId>(seed),
get_from_seed::<ImOnlineId>(seed),
get_from_seed::<AuthorityDiscoveryId>(seed),
)
}
/// Helper function to create GenesisConfig for darwinia
/// is_testnet: under test net we will use Alice & Bob as seed to generate keys,
/// but in production enviroment, these accounts will use preset keys
pub fn darwinia_genesis(
initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
enable_println: bool,
is_testnet: bool,
) -> GenesisConfig {
let eth_relay_authorities: Vec<AccountId> = if is_testnet {
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
]
} else {
vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1]
};
const RING_ENDOWMENT: Balance = 20_000_000 * COIN;
const KTON_ENDOWMENT: Balance = 10 * COIN;
const STASH: Balance = 1000 * COIN;
GenesisConfig {
frame_system: Some(SystemConfig {
code: WASM_BINARY.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_indices: Some(IndicesConfig {
ids: endowed_accounts
.iter()
.cloned()
.chain(initial_authorities.iter().map(|x| x.0.clone()))
.collect::<Vec<_>>(),
}),
pallet_session: Some(SessionConfig {
keys: initial_authorities
.iter()
.map(|x| {
(
x.0.clone(),
session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()),
)
})
.collect::<Vec<_>>(),
}),
// pallet_democracy: Some(DemocracyConfig::default()),
// pallet_collective_Instance1: Some(CouncilConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
// pallet_collective_Instance2: Some(TechnicalCommitteeConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
pallet_contracts: Some(ContractsConfig {
current_schedule: pallet_contracts::Schedule {
enable_println, // this should only be enabled on development chains
..Default::default()
},
gas_price: 1 * MILLI,
}),
pallet_sudo: Some(SudoConfig { key: root_key }),
pallet_babe: Some(BabeConfig { authorities: vec![] }),
pallet_im_online: Some(ImOnlineConfig { keys: vec![] }),
pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }),
pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }),
// pallet_membership_Instance1: Some(Default::default()),
// pallet_treasury: Some(Default::default()),
pallet_ring: Some(BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, RING_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_kton: Some(KtonConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, KTON_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}), | stakers: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator))
.collect(),
invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(),
slash_reward_fraction: Perbill::from_percent(10),
..Default::default()
}),
}
}
/// Staging testnet config.
pub fn staging_testnet_config() -> ChainSpec {
fn staging_testnet_config_genesis() -> GenesisConfig {
// stash, controller, session-key
// generated with secret:
// for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done
// and
// for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done
let initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)> = vec![
(
// 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy
hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(),
// 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq
hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(),
// 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC
hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
),
(
// 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2
hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(),
// 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF
hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(),
// 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE
hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
),
(
// 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp
hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(),
// 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9
hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(),
// 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d
hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
),
(
// 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9
hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(),
// 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn
hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(),
// 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4
hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
),
];
// generated with secret: subkey inspect "$secret"/fir
let root_key: AccountId = hex![
// 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo
"9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809"
]
.into();
let endowed_accounts: Vec<AccountId> = vec![root_key.clone()];
darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true)
}
let boot_nodes = vec![];
ChainSpec::from_genesis(
"Staging Testnet",
"staging_testnet",
staging_testnet_config_genesis,
boot_nodes,
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
None,
None,
Default::default(),
)
}
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec {
fn development_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![get_authority_keys_from_seed("Alice")],
get_account_id_from_seed::<sr25519::Public>("Alice"),
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Development",
"dev",
development_config_genesis,
vec![],
None,
None,
None,
Default::default(),
)
}
/// IceFrog local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
get_authority_keys_from_seed("Alice"),
get_authority_keys_from_seed("Bob"),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
}
/// IceFrog testnet config generator
pub fn gen_icefrog_testnet_config() -> ChainSpec {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
(
hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash
hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(),
get_from_seed::<GrandpaId>("Alice"),
get_from_seed::<BabeId>("Alice"),
get_from_seed::<ImOnlineId>("Alice"),
get_from_seed::<AuthorityDiscoveryId>("Alice"),
),
(
hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash
hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(),
get_from_seed::<GrandpaId>("Bob"),
get_from_seed::<BabeId>("Bob"),
get_from_seed::<ImOnlineId>("Bob"),
get_from_seed::<AuthorityDiscoveryId>("Bob"),
),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
false,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
} | pallet_staking: Some(StakingConfig {
current_era: 0,
validator_count: initial_authorities.len() as u32 * 2,
minimum_validator_count: initial_authorities.len() as u32, | random_line_split |
chain_spec.rs | // Copyright 2018-2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate chain configurations.
use grandpa_primitives::AuthorityId as GrandpaId;
use hex_literal::hex;
use node_runtime::constants::currency::*;
use node_runtime::Block;
use node_runtime::{
AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, GrandpaConfig, ImOnlineConfig,
IndicesConfig, KtonConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, SudoConfig, SystemConfig,
WASM_BINARY,
};
use pallet_im_online::sr25519::AuthorityId as ImOnlineId;
use sc_chain_spec::ChainSpecExtension;
use sc_service::Properties;
use sc_telemetry::TelemetryEndpoints;
use serde::{Deserialize, Serialize};
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
use sp_consensus_babe::AuthorityId as BabeId;
use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public};
use sp_runtime::{
traits::{IdentifyAccount, Verify},
Perbill,
};
pub use node_primitives::{AccountId, Balance, Signature};
pub use node_runtime::GenesisConfig;
type AccountPublic = <Signature as Verify>::Signer;
const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Node `ChainSpec` extensions.
///
/// Additional parameters for some Substrate core modules,
/// customizable from the chain spec.
#[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)]
pub struct Extensions {
/// Block numbers with known hashes.
pub fork_blocks: sc_client::ForkBlocks<Block>,
}
/// Specialized `ChainSpec`.
pub type ChainSpec = sc_service::ChainSpec<GenesisConfig, Extensions>;
/// IceFrog testnet generator
pub fn icefrog_testnet_config() -> Result<ChainSpec, String> {
ChainSpec::from_json_bytes(&include_bytes!("../res/icefrog.json")[..])
}
fn session_keys(
grandpa: GrandpaId,
babe: BabeId,
im_online: ImOnlineId,
authority_discovery: AuthorityDiscoveryId,
) -> SessionKeys {
SessionKeys {
grandpa,
babe,
im_online,
authority_discovery,
}
}
/// Helper function to generate a crypto pair from seed
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
/// Helper function to generate an account ID from seed
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
where
AccountPublic: From<<TPublic::Pair as Pair>::Public>,
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Helper function to generate stash, controller and session key from seed
pub fn get_authority_keys_from_seed(
seed: &str,
) -> (
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
) {
(
get_account_id_from_seed::<sr25519::Public>(&format!("{}//stash", seed)),
get_account_id_from_seed::<sr25519::Public>(seed),
get_from_seed::<GrandpaId>(seed),
get_from_seed::<BabeId>(seed),
get_from_seed::<ImOnlineId>(seed),
get_from_seed::<AuthorityDiscoveryId>(seed),
)
}
/// Helper function to create GenesisConfig for darwinia
/// is_testnet: under test net we will use Alice & Bob as seed to generate keys,
/// but in production enviroment, these accounts will use preset keys
pub fn darwinia_genesis(
initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
enable_println: bool,
is_testnet: bool,
) -> GenesisConfig {
let eth_relay_authorities: Vec<AccountId> = if is_testnet {
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
]
} else {
vec![initial_authorities[0].clone().1, initial_authorities[1].clone().1]
};
const RING_ENDOWMENT: Balance = 20_000_000 * COIN;
const KTON_ENDOWMENT: Balance = 10 * COIN;
const STASH: Balance = 1000 * COIN;
GenesisConfig {
frame_system: Some(SystemConfig {
code: WASM_BINARY.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_indices: Some(IndicesConfig {
ids: endowed_accounts
.iter()
.cloned()
.chain(initial_authorities.iter().map(|x| x.0.clone()))
.collect::<Vec<_>>(),
}),
pallet_session: Some(SessionConfig {
keys: initial_authorities
.iter()
.map(|x| {
(
x.0.clone(),
session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()),
)
})
.collect::<Vec<_>>(),
}),
// pallet_democracy: Some(DemocracyConfig::default()),
// pallet_collective_Instance1: Some(CouncilConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
// pallet_collective_Instance2: Some(TechnicalCommitteeConfig {
// members: endowed_accounts.iter().cloned().collect::<Vec<_>>()[..(num_endowed_accounts + 1) / 2].to_vec(),
// phantom: Default::default(),
// }),
pallet_contracts: Some(ContractsConfig {
current_schedule: pallet_contracts::Schedule {
enable_println, // this should only be enabled on development chains
..Default::default()
},
gas_price: 1 * MILLI,
}),
pallet_sudo: Some(SudoConfig { key: root_key }),
pallet_babe: Some(BabeConfig { authorities: vec![] }),
pallet_im_online: Some(ImOnlineConfig { keys: vec![] }),
pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }),
pallet_grandpa: Some(GrandpaConfig { authorities: vec![] }),
// pallet_membership_Instance1: Some(Default::default()),
// pallet_treasury: Some(Default::default()),
pallet_ring: Some(BalancesConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, RING_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_kton: Some(KtonConfig {
balances: endowed_accounts
.iter()
.cloned()
.map(|k| (k, KTON_ENDOWMENT))
.chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH)))
.collect(),
vesting: vec![],
}),
pallet_staking: Some(StakingConfig {
current_era: 0,
validator_count: initial_authorities.len() as u32 * 2,
minimum_validator_count: initial_authorities.len() as u32,
stakers: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator))
.collect(),
invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(),
slash_reward_fraction: Perbill::from_percent(10),
..Default::default()
}),
}
}
/// Staging testnet config.
pub fn staging_testnet_config() -> ChainSpec {
fn staging_testnet_config_genesis() -> GenesisConfig {
// stash, controller, session-key
// generated with secret:
// for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done
// and
// for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done
let initial_authorities: Vec<(
AccountId,
AccountId,
GrandpaId,
BabeId,
ImOnlineId,
AuthorityDiscoveryId,
)> = vec![
(
// 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy
hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(),
// 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq
hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(),
// 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC
hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
// 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8
hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(),
),
(
// 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2
hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(),
// 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF
hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(),
// 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE
hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
// 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ
hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(),
),
(
// 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp
hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(),
// 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9
hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(),
// 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d
hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
// 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH
hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(),
),
(
// 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9
hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(),
// 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn
hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(),
// 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4
hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
// 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x
hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(),
),
];
// generated with secret: subkey inspect "$secret"/fir
let root_key: AccountId = hex![
// 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo
"9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809"
]
.into();
let endowed_accounts: Vec<AccountId> = vec![root_key.clone()];
darwinia_genesis(initial_authorities, root_key, endowed_accounts, false, true)
}
let boot_nodes = vec![];
ChainSpec::from_genesis(
"Staging Testnet",
"staging_testnet",
staging_testnet_config_genesis,
boot_nodes,
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
None,
None,
Default::default(),
)
}
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec {
fn development_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![get_authority_keys_from_seed("Alice")],
get_account_id_from_seed::<sr25519::Public>("Alice"),
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Development",
"dev",
development_config_genesis,
vec![],
None,
None,
None,
Default::default(),
)
}
/// IceFrog local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec {
fn | () -> GenesisConfig {
darwinia_genesis(
vec![
get_authority_keys_from_seed("Alice"),
get_authority_keys_from_seed("Bob"),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(), // 5FpQFHfKd1xQ9HLZLQoG1JAQSCJoUEVBELnKsKNcuRLZejJR
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
true,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
}
/// IceFrog testnet config generator
pub fn gen_icefrog_testnet_config() -> ChainSpec {
fn icefrog_config_genesis() -> GenesisConfig {
darwinia_genesis(
vec![
(
hex!["be3fd892bf0e2b33dbfcf298c99a9f71e631a57af6c017dc5ac078c5d5b3494b"].into(), //stash
hex!["70bf51d123581d6e51af70b342cac75ae0a0fc71d1a8d388719139af9c042b18"].into(),
get_from_seed::<GrandpaId>("Alice"),
get_from_seed::<BabeId>("Alice"),
get_from_seed::<ImOnlineId>("Alice"),
get_from_seed::<AuthorityDiscoveryId>("Alice"),
),
(
hex!["e2f560c01a2d8e98d313d6799185c28a39e10896332b56304ff46392f585024c"].into(), //stash
hex!["94c51178449c09eec77918ea951fa3244f7b841eea1dd1489d2b5f2a53f8840f"].into(),
get_from_seed::<GrandpaId>("Bob"),
get_from_seed::<BabeId>("Bob"),
get_from_seed::<ImOnlineId>("Bob"),
get_from_seed::<AuthorityDiscoveryId>("Bob"),
),
],
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
vec![
hex!["a60837b2782f7ffd23e95cd26d1aa8d493b8badc6636234ccd44db03c41fcc6c"].into(),
hex!["f29311a581558ded67b8bfd097e614ce8135f777e29777d07ec501adb0ddab08"].into(),
hex!["1098e3bf7b351d6210c61b05edefb3a2b88c9611db26fbed2c7136b6d8f9c90f"].into(),
hex!["f252bc67e45acc9b3852a0ef84ddfce6c9cef25193617ef1421c460ecc2c746f"].into(),
hex!["90ce56f84328b180fc55146709aa7038c18efd58f1f247410be0b1ddc612df27"].into(),
hex!["4ca516c4b95488d0e6e9810a429a010b5716168d777c6b1399d3ed61cce1715c"].into(),
hex!["e28573bb4d9233c799defe8f85fa80a66b43d47f4c1aef64bb8fffde1ecf8606"].into(),
hex!["20e2455350cbe36631e82ce9b12152f98a3738cb763e46e65d1a253806a26d1a"].into(),
hex!["9eccaca8a35f0659aed4df45455a855bcb3e7bff7bfc9d672b676bbb78988f0d"].into(),
hex!["98dba2d3252825f4cd1141ca4f41ea201a22b4e129a6c7253cea546dbb20e442"].into(),
],
true,
false,
)
}
ChainSpec::from_genesis(
"Darwinia IceFrog Testnet",
"icefrog_testnet",
icefrog_config_genesis,
vec![],
Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])),
Some("DAR"),
{
let mut properties = Properties::new();
properties.insert("ss58Format".into(), 42.into());
properties.insert("tokenDecimals".into(), 9.into());
properties.insert("tokenSymbol".into(), "IRING".into());
properties.insert("ktonTokenDecimals".into(), 9.into());
properties.insert("ktonTokenSymbol".into(), "IKTON".into());
Some(properties)
},
Default::default(),
)
}
| icefrog_config_genesis | identifier_name |
mnist_benchmark.py | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MNIST benchmarks."""
import copy
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'mnist'
BENCHMARK_CONFIG = """
mnist:
description: Runs MNIST Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
GCP_ENV = 'PATH=/tmp/pkb/google-cloud-sdk/bin:$PATH'
flags.DEFINE_string('mnist_data_dir', None, 'mnist train file for tensorflow')
flags.DEFINE_string('imagenet_data_dir',
'gs://cloud-tpu-test-datasets/fake_imagenet',
'Directory where the input data is stored')
flags.DEFINE_string(
't2t_data_dir', None,
'Directory where the input data is stored for tensor2tensor')
flags.DEFINE_integer('imagenet_num_train_images', 1281167,
'Size of ImageNet training data set.')
flags.DEFINE_integer('imagenet_num_eval_images', 50000,
'Size of ImageNet validation data set.')
flags.DEFINE_integer('mnist_num_train_images', 55000,
'Size of MNIST training data set.')
flags.DEFINE_integer('mnist_num_eval_images', 5000,
'Size of MNIST validation data set.')
flags.DEFINE_integer('mnist_train_epochs', 37,
'Total number of training echos', lower_bound=1)
flags.DEFINE_integer(
'mnist_eval_epochs', 0,
'Total number of evaluation epochs. If `0`, evaluation '
'after training is skipped.')
flags.DEFINE_integer('tpu_iterations', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer('mnist_batch_size', 1024,
'Mini-batch size for the training. Note that this '
'is the global batch size and not the per-shard batch.')
flags.DEFINE_enum('tpu_precision', 'bfloat16', ['bfloat16', 'float32'],
'Precision to use')
EXAMPLES_PER_SECOND_PRECISION = 0.01
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.data_dir = FLAGS.mnist_data_dir
benchmark_spec.iterations = FLAGS.tpu_iterations
benchmark_spec.gcp_service_account = FLAGS.gcp_service_account
benchmark_spec.batch_size = FLAGS.mnist_batch_size
benchmark_spec.num_train_images = FLAGS.mnist_num_train_images
benchmark_spec.num_eval_images = FLAGS.mnist_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.batch_size)
benchmark_spec.train_epochs = FLAGS.mnist_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.eval_epochs = FLAGS.mnist_eval_epochs
benchmark_spec.eval_steps = int(
benchmark_spec.eval_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.precision = FLAGS.tpu_precision
benchmark_spec.env_cmd = 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models'
def Prepare(benchmark_spec):
"""Install and set up MNIST on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
benchmark_spec.always_call_cleanup = True
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if not benchmark_spec.tpus:
vm.Install('tensorflow')
vm.Install('cloud_tpu_models')
vm.Install('tensorflow_models')
if benchmark_spec.tpus:
storage_service = gcs.GoogleCloudStorageService()
benchmark_spec.storage_service = storage_service
bucket = 'pkb{}'.format(FLAGS.run_uri)
benchmark_spec.bucket = bucket
benchmark_spec.model_dir = 'gs://{}'.format(bucket)
location = benchmark_spec.tpu_groups['train'].GetZone()
storage_service.PrepareService(util.GetRegionFromZone(location))
storage_service.MakeBucket(bucket)
storage_service.AclBucket(benchmark_spec.gcp_service_account, gcs.WRITER,
bucket)
else:
benchmark_spec.model_dir = '/tmp'
def CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = {
'use_tpu': bool(benchmark_spec.tpus),
'data_dir': benchmark_spec.data_dir,
'model_dir': benchmark_spec.model_dir,
'train_steps': benchmark_spec.train_steps,
'eval_steps': benchmark_spec.eval_steps,
'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
'iterations': benchmark_spec.iterations,
'num_train_images': benchmark_spec.num_train_images,
'num_eval_images': benchmark_spec.num_eval_images,
'train_epochs': benchmark_spec.train_epochs,
'eval_epochs': benchmark_spec.eval_epochs,
'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch,
'train_batch_size': benchmark_spec.batch_size,
'eval_batch_size': benchmark_spec.batch_size
}
if benchmark_spec.tpus:
metadata.update({
'train_tpu_num_shards':
benchmark_spec.tpu_groups['train'].GetNumShards(),
'train_tpu_accelerator_type':
benchmark_spec.tpu_groups['train'].GetAcceleratorType()
})
return metadata
def ExtractThroughput(regex, output, metadata, metric, unit):
"""Extract throughput from MNIST output.
Args:
regex: string. Regular expression.
output: MNIST output
metadata: dict. Additional metadata to include with the sample.
metric: string. Name of the metric within the benchmark.
unit: string. Units for 'value'.
Returns:
samples containing the throughput
"""
matches = regex_util.ExtractAllMatches(regex, output)
samples = []
for index, value in enumerate(matches):
metadata_with_index = copy.deepcopy(metadata)
metadata_with_index['index'] = index
samples.append(sample.Sample(metric, float(value), unit,
metadata_with_index))
return samples
def MakeSamplesFromTrainOutput(metadata, output, elapsed_seconds, step):
"""Create a sample containing training metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
step: int, the global steps in the training process.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing training metrics, current step, elapsed seconds
"""
samples = []
metadata_copy = metadata.copy()
metadata_copy['step'] = int(step)
metadata_copy['epoch'] = step / metadata['num_examples_per_epoch']
metadata_copy['elapsed_seconds'] = elapsed_seconds
get_mean = lambda matches: sum(float(x) for x in matches) / len(matches)
loss = get_mean(regex_util.ExtractAllMatches(
r'Loss for final step: (\d+\.\d+)', output))
samples.append(sample.Sample('Loss', float(loss), '', metadata_copy))
if 'global_step/sec: ' in output:
|
return samples
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds):
"""Create a sample containing evaluation metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing evaluation metrics
"""
pattern = (r'Saving dict for global step \d+: accuracy = (\d+\.\d+), '
r'global_step = (\d+), loss = (\d+\.\d+)')
accuracy, step, loss = regex_util.ExtractAllMatches(pattern, output).pop()
metadata_copy = metadata.copy()
step = int(step)
metadata_copy['step'] = step
num_examples_per_epoch = metadata['num_examples_per_epoch']
metadata_copy['epoch'] = step / num_examples_per_epoch
metadata_copy['elapsed_seconds'] = elapsed_seconds
return [sample.Sample('Eval Loss', float(loss), '', metadata_copy),
sample.Sample('Accuracy', float(accuracy) * 100, '%', metadata_copy)]
def Run(benchmark_spec):
"""Run MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if benchmark_spec.tpus:
mnist_benchmark_script = 'mnist_tpu.py'
mnist_benchmark_cmd = ('cd tpu/models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--iterations={iterations} '
'--model_dir={model_dir} '
'--batch_size={batch_size}'.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
iterations=benchmark_spec.iterations,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
else:
mnist_benchmark_script = 'mnist.py'
mnist_benchmark_cmd = ('cd models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--batch_size={batch_size} '.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
if nvidia_driver.CheckNvidiaGpuExists(vm):
mnist_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)
samples = []
metadata = CreateMetadataDict(benchmark_spec)
if benchmark_spec.train_steps > 0:
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
if benchmark_spec.tpus:
mnist_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '
'{num_shards} --noenable_predict'.format(
cmd=mnist_benchmark_cmd,
tpu=tpu,
use_tpu=bool(benchmark_spec.tpus),
train_steps=benchmark_spec.train_steps,
num_shards=num_shards))
else:
mnist_benchmark_train_cmd = (
'{cmd} --train_epochs={train_epochs} '.format(
cmd=mnist_benchmark_cmd,
train_epochs=benchmark_spec.train_epochs))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)
elapsed_seconds = (time.time() - start)
samples.extend(MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))
if benchmark_spec.eval_steps > 0:
if benchmark_spec.tpus:
mnist_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'
.format(
cmd=mnist_benchmark_cmd,
use_tpu=bool(benchmark_spec.tpus),
tpu=benchmark_spec.tpu_groups['eval'].GetName(),
eval_steps=benchmark_spec.eval_steps))
else:
mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(
cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)
samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,
elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if benchmark_spec.tpus:
benchmark_spec.storage_service.DeleteBucket(benchmark_spec.bucket)
| global_step_sec = get_mean(regex_util.ExtractAllMatches(
r'global_step/sec: (\S+)', output))
samples.append(sample.Sample(
'Global Steps Per Second', global_step_sec,
'global_steps/sec', metadata_copy))
examples_sec = global_step_sec * metadata['train_batch_size']
if 'examples/sec: ' in output:
examples_sec_log = get_mean(regex_util.ExtractAllMatches(
r'examples/sec: (\S+)', output))
precision = abs(examples_sec_log - examples_sec) / examples_sec_log
assert precision < EXAMPLES_PER_SECOND_PRECISION, 'examples/sec is wrong.'
examples_sec = examples_sec_log
samples.append(sample.Sample('Examples Per Second', examples_sec,
'examples/sec', metadata_copy)) | conditional_block |
mnist_benchmark.py | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MNIST benchmarks."""
import copy
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'mnist'
BENCHMARK_CONFIG = """
mnist:
description: Runs MNIST Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
GCP_ENV = 'PATH=/tmp/pkb/google-cloud-sdk/bin:$PATH'
flags.DEFINE_string('mnist_data_dir', None, 'mnist train file for tensorflow')
flags.DEFINE_string('imagenet_data_dir',
'gs://cloud-tpu-test-datasets/fake_imagenet',
'Directory where the input data is stored')
flags.DEFINE_string(
't2t_data_dir', None,
'Directory where the input data is stored for tensor2tensor')
flags.DEFINE_integer('imagenet_num_train_images', 1281167,
'Size of ImageNet training data set.')
flags.DEFINE_integer('imagenet_num_eval_images', 50000,
'Size of ImageNet validation data set.')
flags.DEFINE_integer('mnist_num_train_images', 55000,
'Size of MNIST training data set.')
flags.DEFINE_integer('mnist_num_eval_images', 5000,
'Size of MNIST validation data set.')
flags.DEFINE_integer('mnist_train_epochs', 37,
'Total number of training echos', lower_bound=1)
flags.DEFINE_integer(
'mnist_eval_epochs', 0,
'Total number of evaluation epochs. If `0`, evaluation '
'after training is skipped.')
flags.DEFINE_integer('tpu_iterations', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer('mnist_batch_size', 1024,
'Mini-batch size for the training. Note that this '
'is the global batch size and not the per-shard batch.')
flags.DEFINE_enum('tpu_precision', 'bfloat16', ['bfloat16', 'float32'],
'Precision to use')
EXAMPLES_PER_SECOND_PRECISION = 0.01
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.data_dir = FLAGS.mnist_data_dir
benchmark_spec.iterations = FLAGS.tpu_iterations
benchmark_spec.gcp_service_account = FLAGS.gcp_service_account
benchmark_spec.batch_size = FLAGS.mnist_batch_size
benchmark_spec.num_train_images = FLAGS.mnist_num_train_images
benchmark_spec.num_eval_images = FLAGS.mnist_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.batch_size)
benchmark_spec.train_epochs = FLAGS.mnist_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.eval_epochs = FLAGS.mnist_eval_epochs
benchmark_spec.eval_steps = int(
benchmark_spec.eval_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.precision = FLAGS.tpu_precision
benchmark_spec.env_cmd = 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models'
def Prepare(benchmark_spec):
"""Install and set up MNIST on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
benchmark_spec.always_call_cleanup = True
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if not benchmark_spec.tpus:
vm.Install('tensorflow')
vm.Install('cloud_tpu_models')
vm.Install('tensorflow_models')
if benchmark_spec.tpus:
storage_service = gcs.GoogleCloudStorageService()
benchmark_spec.storage_service = storage_service
bucket = 'pkb{}'.format(FLAGS.run_uri)
benchmark_spec.bucket = bucket
benchmark_spec.model_dir = 'gs://{}'.format(bucket)
location = benchmark_spec.tpu_groups['train'].GetZone()
storage_service.PrepareService(util.GetRegionFromZone(location))
storage_service.MakeBucket(bucket)
storage_service.AclBucket(benchmark_spec.gcp_service_account, gcs.WRITER,
bucket)
else:
benchmark_spec.model_dir = '/tmp'
def CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = {
'use_tpu': bool(benchmark_spec.tpus),
'data_dir': benchmark_spec.data_dir,
'model_dir': benchmark_spec.model_dir,
'train_steps': benchmark_spec.train_steps,
'eval_steps': benchmark_spec.eval_steps,
'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
'iterations': benchmark_spec.iterations,
'num_train_images': benchmark_spec.num_train_images,
'num_eval_images': benchmark_spec.num_eval_images,
'train_epochs': benchmark_spec.train_epochs,
'eval_epochs': benchmark_spec.eval_epochs,
'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch,
'train_batch_size': benchmark_spec.batch_size,
'eval_batch_size': benchmark_spec.batch_size
}
if benchmark_spec.tpus:
metadata.update({
'train_tpu_num_shards':
benchmark_spec.tpu_groups['train'].GetNumShards(),
'train_tpu_accelerator_type':
benchmark_spec.tpu_groups['train'].GetAcceleratorType()
})
return metadata
def ExtractThroughput(regex, output, metadata, metric, unit):
"""Extract throughput from MNIST output.
Args:
regex: string. Regular expression.
output: MNIST output
metadata: dict. Additional metadata to include with the sample.
metric: string. Name of the metric within the benchmark.
unit: string. Units for 'value'.
Returns:
samples containing the throughput | metadata_with_index = copy.deepcopy(metadata)
metadata_with_index['index'] = index
samples.append(sample.Sample(metric, float(value), unit,
metadata_with_index))
return samples
def MakeSamplesFromTrainOutput(metadata, output, elapsed_seconds, step):
"""Create a sample containing training metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
step: int, the global steps in the training process.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing training metrics, current step, elapsed seconds
"""
samples = []
metadata_copy = metadata.copy()
metadata_copy['step'] = int(step)
metadata_copy['epoch'] = step / metadata['num_examples_per_epoch']
metadata_copy['elapsed_seconds'] = elapsed_seconds
get_mean = lambda matches: sum(float(x) for x in matches) / len(matches)
loss = get_mean(regex_util.ExtractAllMatches(
r'Loss for final step: (\d+\.\d+)', output))
samples.append(sample.Sample('Loss', float(loss), '', metadata_copy))
if 'global_step/sec: ' in output:
global_step_sec = get_mean(regex_util.ExtractAllMatches(
r'global_step/sec: (\S+)', output))
samples.append(sample.Sample(
'Global Steps Per Second', global_step_sec,
'global_steps/sec', metadata_copy))
examples_sec = global_step_sec * metadata['train_batch_size']
if 'examples/sec: ' in output:
examples_sec_log = get_mean(regex_util.ExtractAllMatches(
r'examples/sec: (\S+)', output))
precision = abs(examples_sec_log - examples_sec) / examples_sec_log
assert precision < EXAMPLES_PER_SECOND_PRECISION, 'examples/sec is wrong.'
examples_sec = examples_sec_log
samples.append(sample.Sample('Examples Per Second', examples_sec,
'examples/sec', metadata_copy))
return samples
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds):
"""Create a sample containing evaluation metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing evaluation metrics
"""
pattern = (r'Saving dict for global step \d+: accuracy = (\d+\.\d+), '
r'global_step = (\d+), loss = (\d+\.\d+)')
accuracy, step, loss = regex_util.ExtractAllMatches(pattern, output).pop()
metadata_copy = metadata.copy()
step = int(step)
metadata_copy['step'] = step
num_examples_per_epoch = metadata['num_examples_per_epoch']
metadata_copy['epoch'] = step / num_examples_per_epoch
metadata_copy['elapsed_seconds'] = elapsed_seconds
return [sample.Sample('Eval Loss', float(loss), '', metadata_copy),
sample.Sample('Accuracy', float(accuracy) * 100, '%', metadata_copy)]
def Run(benchmark_spec):
"""Run MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if benchmark_spec.tpus:
mnist_benchmark_script = 'mnist_tpu.py'
mnist_benchmark_cmd = ('cd tpu/models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--iterations={iterations} '
'--model_dir={model_dir} '
'--batch_size={batch_size}'.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
iterations=benchmark_spec.iterations,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
else:
mnist_benchmark_script = 'mnist.py'
mnist_benchmark_cmd = ('cd models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--batch_size={batch_size} '.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
if nvidia_driver.CheckNvidiaGpuExists(vm):
mnist_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)
samples = []
metadata = CreateMetadataDict(benchmark_spec)
if benchmark_spec.train_steps > 0:
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
if benchmark_spec.tpus:
mnist_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '
'{num_shards} --noenable_predict'.format(
cmd=mnist_benchmark_cmd,
tpu=tpu,
use_tpu=bool(benchmark_spec.tpus),
train_steps=benchmark_spec.train_steps,
num_shards=num_shards))
else:
mnist_benchmark_train_cmd = (
'{cmd} --train_epochs={train_epochs} '.format(
cmd=mnist_benchmark_cmd,
train_epochs=benchmark_spec.train_epochs))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)
elapsed_seconds = (time.time() - start)
samples.extend(MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))
if benchmark_spec.eval_steps > 0:
if benchmark_spec.tpus:
mnist_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'
.format(
cmd=mnist_benchmark_cmd,
use_tpu=bool(benchmark_spec.tpus),
tpu=benchmark_spec.tpu_groups['eval'].GetName(),
eval_steps=benchmark_spec.eval_steps))
else:
mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(
cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)
samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,
elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if benchmark_spec.tpus:
benchmark_spec.storage_service.DeleteBucket(benchmark_spec.bucket) | """
matches = regex_util.ExtractAllMatches(regex, output)
samples = []
for index, value in enumerate(matches): | random_line_split |
mnist_benchmark.py | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MNIST benchmarks."""
import copy
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'mnist'
BENCHMARK_CONFIG = """
mnist:
description: Runs MNIST Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
GCP_ENV = 'PATH=/tmp/pkb/google-cloud-sdk/bin:$PATH'
flags.DEFINE_string('mnist_data_dir', None, 'mnist train file for tensorflow')
flags.DEFINE_string('imagenet_data_dir',
'gs://cloud-tpu-test-datasets/fake_imagenet',
'Directory where the input data is stored')
flags.DEFINE_string(
't2t_data_dir', None,
'Directory where the input data is stored for tensor2tensor')
flags.DEFINE_integer('imagenet_num_train_images', 1281167,
'Size of ImageNet training data set.')
flags.DEFINE_integer('imagenet_num_eval_images', 50000,
'Size of ImageNet validation data set.')
flags.DEFINE_integer('mnist_num_train_images', 55000,
'Size of MNIST training data set.')
flags.DEFINE_integer('mnist_num_eval_images', 5000,
'Size of MNIST validation data set.')
flags.DEFINE_integer('mnist_train_epochs', 37,
'Total number of training echos', lower_bound=1)
flags.DEFINE_integer(
'mnist_eval_epochs', 0,
'Total number of evaluation epochs. If `0`, evaluation '
'after training is skipped.')
flags.DEFINE_integer('tpu_iterations', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer('mnist_batch_size', 1024,
'Mini-batch size for the training. Note that this '
'is the global batch size and not the per-shard batch.')
flags.DEFINE_enum('tpu_precision', 'bfloat16', ['bfloat16', 'float32'],
'Precision to use')
EXAMPLES_PER_SECOND_PRECISION = 0.01
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def | (benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.data_dir = FLAGS.mnist_data_dir
benchmark_spec.iterations = FLAGS.tpu_iterations
benchmark_spec.gcp_service_account = FLAGS.gcp_service_account
benchmark_spec.batch_size = FLAGS.mnist_batch_size
benchmark_spec.num_train_images = FLAGS.mnist_num_train_images
benchmark_spec.num_eval_images = FLAGS.mnist_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.batch_size)
benchmark_spec.train_epochs = FLAGS.mnist_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.eval_epochs = FLAGS.mnist_eval_epochs
benchmark_spec.eval_steps = int(
benchmark_spec.eval_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.precision = FLAGS.tpu_precision
benchmark_spec.env_cmd = 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models'
def Prepare(benchmark_spec):
"""Install and set up MNIST on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
benchmark_spec.always_call_cleanup = True
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if not benchmark_spec.tpus:
vm.Install('tensorflow')
vm.Install('cloud_tpu_models')
vm.Install('tensorflow_models')
if benchmark_spec.tpus:
storage_service = gcs.GoogleCloudStorageService()
benchmark_spec.storage_service = storage_service
bucket = 'pkb{}'.format(FLAGS.run_uri)
benchmark_spec.bucket = bucket
benchmark_spec.model_dir = 'gs://{}'.format(bucket)
location = benchmark_spec.tpu_groups['train'].GetZone()
storage_service.PrepareService(util.GetRegionFromZone(location))
storage_service.MakeBucket(bucket)
storage_service.AclBucket(benchmark_spec.gcp_service_account, gcs.WRITER,
bucket)
else:
benchmark_spec.model_dir = '/tmp'
def CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = {
'use_tpu': bool(benchmark_spec.tpus),
'data_dir': benchmark_spec.data_dir,
'model_dir': benchmark_spec.model_dir,
'train_steps': benchmark_spec.train_steps,
'eval_steps': benchmark_spec.eval_steps,
'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
'iterations': benchmark_spec.iterations,
'num_train_images': benchmark_spec.num_train_images,
'num_eval_images': benchmark_spec.num_eval_images,
'train_epochs': benchmark_spec.train_epochs,
'eval_epochs': benchmark_spec.eval_epochs,
'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch,
'train_batch_size': benchmark_spec.batch_size,
'eval_batch_size': benchmark_spec.batch_size
}
if benchmark_spec.tpus:
metadata.update({
'train_tpu_num_shards':
benchmark_spec.tpu_groups['train'].GetNumShards(),
'train_tpu_accelerator_type':
benchmark_spec.tpu_groups['train'].GetAcceleratorType()
})
return metadata
def ExtractThroughput(regex, output, metadata, metric, unit):
"""Extract throughput from MNIST output.
Args:
regex: string. Regular expression.
output: MNIST output
metadata: dict. Additional metadata to include with the sample.
metric: string. Name of the metric within the benchmark.
unit: string. Units for 'value'.
Returns:
samples containing the throughput
"""
matches = regex_util.ExtractAllMatches(regex, output)
samples = []
for index, value in enumerate(matches):
metadata_with_index = copy.deepcopy(metadata)
metadata_with_index['index'] = index
samples.append(sample.Sample(metric, float(value), unit,
metadata_with_index))
return samples
def MakeSamplesFromTrainOutput(metadata, output, elapsed_seconds, step):
"""Create a sample containing training metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
step: int, the global steps in the training process.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing training metrics, current step, elapsed seconds
"""
samples = []
metadata_copy = metadata.copy()
metadata_copy['step'] = int(step)
metadata_copy['epoch'] = step / metadata['num_examples_per_epoch']
metadata_copy['elapsed_seconds'] = elapsed_seconds
get_mean = lambda matches: sum(float(x) for x in matches) / len(matches)
loss = get_mean(regex_util.ExtractAllMatches(
r'Loss for final step: (\d+\.\d+)', output))
samples.append(sample.Sample('Loss', float(loss), '', metadata_copy))
if 'global_step/sec: ' in output:
global_step_sec = get_mean(regex_util.ExtractAllMatches(
r'global_step/sec: (\S+)', output))
samples.append(sample.Sample(
'Global Steps Per Second', global_step_sec,
'global_steps/sec', metadata_copy))
examples_sec = global_step_sec * metadata['train_batch_size']
if 'examples/sec: ' in output:
examples_sec_log = get_mean(regex_util.ExtractAllMatches(
r'examples/sec: (\S+)', output))
precision = abs(examples_sec_log - examples_sec) / examples_sec_log
assert precision < EXAMPLES_PER_SECOND_PRECISION, 'examples/sec is wrong.'
examples_sec = examples_sec_log
samples.append(sample.Sample('Examples Per Second', examples_sec,
'examples/sec', metadata_copy))
return samples
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds):
"""Create a sample containing evaluation metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing evaluation metrics
"""
pattern = (r'Saving dict for global step \d+: accuracy = (\d+\.\d+), '
r'global_step = (\d+), loss = (\d+\.\d+)')
accuracy, step, loss = regex_util.ExtractAllMatches(pattern, output).pop()
metadata_copy = metadata.copy()
step = int(step)
metadata_copy['step'] = step
num_examples_per_epoch = metadata['num_examples_per_epoch']
metadata_copy['epoch'] = step / num_examples_per_epoch
metadata_copy['elapsed_seconds'] = elapsed_seconds
return [sample.Sample('Eval Loss', float(loss), '', metadata_copy),
sample.Sample('Accuracy', float(accuracy) * 100, '%', metadata_copy)]
def Run(benchmark_spec):
"""Run MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if benchmark_spec.tpus:
mnist_benchmark_script = 'mnist_tpu.py'
mnist_benchmark_cmd = ('cd tpu/models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--iterations={iterations} '
'--model_dir={model_dir} '
'--batch_size={batch_size}'.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
iterations=benchmark_spec.iterations,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
else:
mnist_benchmark_script = 'mnist.py'
mnist_benchmark_cmd = ('cd models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--batch_size={batch_size} '.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
if nvidia_driver.CheckNvidiaGpuExists(vm):
mnist_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)
samples = []
metadata = CreateMetadataDict(benchmark_spec)
if benchmark_spec.train_steps > 0:
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
if benchmark_spec.tpus:
mnist_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '
'{num_shards} --noenable_predict'.format(
cmd=mnist_benchmark_cmd,
tpu=tpu,
use_tpu=bool(benchmark_spec.tpus),
train_steps=benchmark_spec.train_steps,
num_shards=num_shards))
else:
mnist_benchmark_train_cmd = (
'{cmd} --train_epochs={train_epochs} '.format(
cmd=mnist_benchmark_cmd,
train_epochs=benchmark_spec.train_epochs))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)
elapsed_seconds = (time.time() - start)
samples.extend(MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))
if benchmark_spec.eval_steps > 0:
if benchmark_spec.tpus:
mnist_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'
.format(
cmd=mnist_benchmark_cmd,
use_tpu=bool(benchmark_spec.tpus),
tpu=benchmark_spec.tpu_groups['eval'].GetName(),
eval_steps=benchmark_spec.eval_steps))
else:
mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(
cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)
samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,
elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if benchmark_spec.tpus:
benchmark_spec.storage_service.DeleteBucket(benchmark_spec.bucket)
| _UpdateBenchmarkSpecWithFlags | identifier_name |
mnist_benchmark.py | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MNIST benchmarks."""
import copy
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import nvidia_driver
from perfkitbenchmarker.linux_packages import tensorflow
from perfkitbenchmarker.providers.gcp import gcs
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'mnist'
BENCHMARK_CONFIG = """
mnist:
description: Runs MNIST Benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
GCP_ENV = 'PATH=/tmp/pkb/google-cloud-sdk/bin:$PATH'
flags.DEFINE_string('mnist_data_dir', None, 'mnist train file for tensorflow')
flags.DEFINE_string('imagenet_data_dir',
'gs://cloud-tpu-test-datasets/fake_imagenet',
'Directory where the input data is stored')
flags.DEFINE_string(
't2t_data_dir', None,
'Directory where the input data is stored for tensor2tensor')
flags.DEFINE_integer('imagenet_num_train_images', 1281167,
'Size of ImageNet training data set.')
flags.DEFINE_integer('imagenet_num_eval_images', 50000,
'Size of ImageNet validation data set.')
flags.DEFINE_integer('mnist_num_train_images', 55000,
'Size of MNIST training data set.')
flags.DEFINE_integer('mnist_num_eval_images', 5000,
'Size of MNIST validation data set.')
flags.DEFINE_integer('mnist_train_epochs', 37,
'Total number of training echos', lower_bound=1)
flags.DEFINE_integer(
'mnist_eval_epochs', 0,
'Total number of evaluation epochs. If `0`, evaluation '
'after training is skipped.')
flags.DEFINE_integer('tpu_iterations', 500,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer('mnist_batch_size', 1024,
'Mini-batch size for the training. Note that this '
'is the global batch size and not the per-shard batch.')
flags.DEFINE_enum('tpu_precision', 'bfloat16', ['bfloat16', 'float32'],
'Precision to use')
EXAMPLES_PER_SECOND_PRECISION = 0.01
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.data_dir = FLAGS.mnist_data_dir
benchmark_spec.iterations = FLAGS.tpu_iterations
benchmark_spec.gcp_service_account = FLAGS.gcp_service_account
benchmark_spec.batch_size = FLAGS.mnist_batch_size
benchmark_spec.num_train_images = FLAGS.mnist_num_train_images
benchmark_spec.num_eval_images = FLAGS.mnist_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.batch_size)
benchmark_spec.train_epochs = FLAGS.mnist_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.eval_epochs = FLAGS.mnist_eval_epochs
benchmark_spec.eval_steps = int(
benchmark_spec.eval_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.precision = FLAGS.tpu_precision
benchmark_spec.env_cmd = 'export PYTHONPATH=$PYTHONPATH:$PWD/tpu/models'
def Prepare(benchmark_spec):
"""Install and set up MNIST on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
benchmark_spec.always_call_cleanup = True
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if not benchmark_spec.tpus:
vm.Install('tensorflow')
vm.Install('cloud_tpu_models')
vm.Install('tensorflow_models')
if benchmark_spec.tpus:
storage_service = gcs.GoogleCloudStorageService()
benchmark_spec.storage_service = storage_service
bucket = 'pkb{}'.format(FLAGS.run_uri)
benchmark_spec.bucket = bucket
benchmark_spec.model_dir = 'gs://{}'.format(bucket)
location = benchmark_spec.tpu_groups['train'].GetZone()
storage_service.PrepareService(util.GetRegionFromZone(location))
storage_service.MakeBucket(bucket)
storage_service.AclBucket(benchmark_spec.gcp_service_account, gcs.WRITER,
bucket)
else:
benchmark_spec.model_dir = '/tmp'
def CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = {
'use_tpu': bool(benchmark_spec.tpus),
'data_dir': benchmark_spec.data_dir,
'model_dir': benchmark_spec.model_dir,
'train_steps': benchmark_spec.train_steps,
'eval_steps': benchmark_spec.eval_steps,
'commit': cloud_tpu_models.GetCommit(benchmark_spec.vms[0]),
'iterations': benchmark_spec.iterations,
'num_train_images': benchmark_spec.num_train_images,
'num_eval_images': benchmark_spec.num_eval_images,
'train_epochs': benchmark_spec.train_epochs,
'eval_epochs': benchmark_spec.eval_epochs,
'num_examples_per_epoch': benchmark_spec.num_examples_per_epoch,
'train_batch_size': benchmark_spec.batch_size,
'eval_batch_size': benchmark_spec.batch_size
}
if benchmark_spec.tpus:
metadata.update({
'train_tpu_num_shards':
benchmark_spec.tpu_groups['train'].GetNumShards(),
'train_tpu_accelerator_type':
benchmark_spec.tpu_groups['train'].GetAcceleratorType()
})
return metadata
def ExtractThroughput(regex, output, metadata, metric, unit):
"""Extract throughput from MNIST output.
Args:
regex: string. Regular expression.
output: MNIST output
metadata: dict. Additional metadata to include with the sample.
metric: string. Name of the metric within the benchmark.
unit: string. Units for 'value'.
Returns:
samples containing the throughput
"""
matches = regex_util.ExtractAllMatches(regex, output)
samples = []
for index, value in enumerate(matches):
metadata_with_index = copy.deepcopy(metadata)
metadata_with_index['index'] = index
samples.append(sample.Sample(metric, float(value), unit,
metadata_with_index))
return samples
def MakeSamplesFromTrainOutput(metadata, output, elapsed_seconds, step):
"""Create a sample containing training metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
step: int, the global steps in the training process.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing training metrics, current step, elapsed seconds
"""
samples = []
metadata_copy = metadata.copy()
metadata_copy['step'] = int(step)
metadata_copy['epoch'] = step / metadata['num_examples_per_epoch']
metadata_copy['elapsed_seconds'] = elapsed_seconds
get_mean = lambda matches: sum(float(x) for x in matches) / len(matches)
loss = get_mean(regex_util.ExtractAllMatches(
r'Loss for final step: (\d+\.\d+)', output))
samples.append(sample.Sample('Loss', float(loss), '', metadata_copy))
if 'global_step/sec: ' in output:
global_step_sec = get_mean(regex_util.ExtractAllMatches(
r'global_step/sec: (\S+)', output))
samples.append(sample.Sample(
'Global Steps Per Second', global_step_sec,
'global_steps/sec', metadata_copy))
examples_sec = global_step_sec * metadata['train_batch_size']
if 'examples/sec: ' in output:
examples_sec_log = get_mean(regex_util.ExtractAllMatches(
r'examples/sec: (\S+)', output))
precision = abs(examples_sec_log - examples_sec) / examples_sec_log
assert precision < EXAMPLES_PER_SECOND_PRECISION, 'examples/sec is wrong.'
examples_sec = examples_sec_log
samples.append(sample.Sample('Examples Per Second', examples_sec,
'examples/sec', metadata_copy))
return samples
def MakeSamplesFromEvalOutput(metadata, output, elapsed_seconds):
|
def Run(benchmark_spec):
"""Run MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
if benchmark_spec.tpus:
mnist_benchmark_script = 'mnist_tpu.py'
mnist_benchmark_cmd = ('cd tpu/models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--iterations={iterations} '
'--model_dir={model_dir} '
'--batch_size={batch_size}'.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
iterations=benchmark_spec.iterations,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
else:
mnist_benchmark_script = 'mnist.py'
mnist_benchmark_cmd = ('cd models && '
'export PYTHONPATH=$(pwd) && '
'cd official/mnist && '
'python {script} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--batch_size={batch_size} '.format(
script=mnist_benchmark_script,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
batch_size=benchmark_spec.batch_size))
if nvidia_driver.CheckNvidiaGpuExists(vm):
mnist_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)
samples = []
metadata = CreateMetadataDict(benchmark_spec)
if benchmark_spec.train_steps > 0:
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
if benchmark_spec.tpus:
mnist_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '
'{num_shards} --noenable_predict'.format(
cmd=mnist_benchmark_cmd,
tpu=tpu,
use_tpu=bool(benchmark_spec.tpus),
train_steps=benchmark_spec.train_steps,
num_shards=num_shards))
else:
mnist_benchmark_train_cmd = (
'{cmd} --train_epochs={train_epochs} '.format(
cmd=mnist_benchmark_cmd,
train_epochs=benchmark_spec.train_epochs))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)
elapsed_seconds = (time.time() - start)
samples.extend(MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))
if benchmark_spec.eval_steps > 0:
if benchmark_spec.tpus:
mnist_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'
.format(
cmd=mnist_benchmark_cmd,
use_tpu=bool(benchmark_spec.tpus),
tpu=benchmark_spec.tpu_groups['eval'].GetName(),
eval_steps=benchmark_spec.eval_steps))
else:
mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(
cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))
stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)
samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,
elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup MNIST on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if benchmark_spec.tpus:
benchmark_spec.storage_service.DeleteBucket(benchmark_spec.bucket)
| """Create a sample containing evaluation metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
elapsed_seconds: float, elapsed seconds from saved checkpoint.
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mnist_benchmark_test.py
Returns:
a Sample containing evaluation metrics
"""
pattern = (r'Saving dict for global step \d+: accuracy = (\d+\.\d+), '
r'global_step = (\d+), loss = (\d+\.\d+)')
accuracy, step, loss = regex_util.ExtractAllMatches(pattern, output).pop()
metadata_copy = metadata.copy()
step = int(step)
metadata_copy['step'] = step
num_examples_per_epoch = metadata['num_examples_per_epoch']
metadata_copy['epoch'] = step / num_examples_per_epoch
metadata_copy['elapsed_seconds'] = elapsed_seconds
return [sample.Sample('Eval Loss', float(loss), '', metadata_copy),
sample.Sample('Accuracy', float(accuracy) * 100, '%', metadata_copy)] | identifier_body |
ext.rs | //! Safe wrapper around externalities invokes.
use wasm_std::{
self,
types::{H256, U256, Address}
};
/// Generic wasm error
#[derive(Debug)]
pub struct Error;
mod external {
extern "C" {
// Various call variants
/// Direct/classic call.
/// Corresponds to "CALL" opcode in EVM
pub fn ccall(
gas: i64,
address: *const u8,
val_ptr: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Delegate call.
/// Corresponds to "CALLCODE" opcode in EVM
pub fn dcall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Static call.
/// Corresponds to "STACICCALL" opcode in EVM
pub fn scall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
// environmental blockchain functions (runtime might not provide all of these!)
pub fn blockhash(number: i64, dest: *mut u8);
pub fn balance(address: *const u8, dest: *mut u8);
pub fn coinbase(dest: *mut u8);
pub fn timestamp() -> i64;
pub fn blocknumber() -> i64;
pub fn difficulty(dest: *mut u8);
pub fn gaslimit(dest: *mut u8);
#[cfg(feature = "kip6")]
pub fn gasleft() -> i64;
pub fn sender(dest: *mut u8);
pub fn address(dest: *mut u8);
pub fn value(dest: *mut u8);
pub fn origin(dest: *mut u8);
pub fn elog(
topic_ptr: *const u8,
topic_count: u32,
data_ptr: *const u8,
data_len: u32
);
pub fn create(
endowment: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
#[cfg(feature = "kip4")]
pub fn create2(
endowment: *const u8,
salt: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
pub fn suicide(refund: *const u8) -> !;
pub fn ret(ptr: *const u8, len: u32) -> !;
pub fn input_length() -> u32;
pub fn fetch_input(dst: *mut u8);
}
}
/// Halt execution and register account for deletion.
///
/// Value of the current account will be tranfered to `refund` address.
pub fn suicide(refund: &Address) -> ! {
unsafe { external::suicide(refund.as_ptr()); }
}
/// Get balance of the given account.
///
/// If an account is not registered in the chain yet,
/// it is considered as an account with `balance = 0`.
pub fn balance(address: &Address) -> U256 {
unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) }
}
/// Create a new account with the given code
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::zero();
unsafe {
if external::create(
endowment_arr.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
#[cfg(feature = "kip4")]
/// Create a new account with the given code and salt, requires KIP-4.
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::new();
unsafe {
if external::create2(
endowment_arr.as_ptr(),
salt.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
/// Message-call into an account
///
/// # Arguments:
/// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount
/// * `address` - an address of contract to send a call
/// * `value` - a value in Wei to send with a call
/// * `input` - a data to send with a call
/// * `result` - a mutable reference to be filled with a result data
///
/// # Returns:
///
/// Call is succeed if it returns `Result::Ok(())`
/// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting
pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
let mut value_arr = [0u8; 32];
value.to_big_endian(&mut value_arr);
unsafe {
if external::ccall(
gas as i64,
address.as_ptr(),
value_arr.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(), result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but with code at the given `address`
///
/// Effectively this function is like calling current account but with
/// different code (i.e. like `DELEGATECALL` EVM instruction).
///
/// [`call`]: fn.call.html
pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::dcall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage.
///
/// It will return an error in this case.
///
/// [`call`]: fn.call.html
pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::scall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Returns hash of the given block or H256::zero()
///
/// Only works for 256 most recent blocks excluding current
/// Returns H256::zero() in case of failure
pub fn block_hash(block_number: u64) -> H256 {
let mut res = H256::zero();
unsafe {
external::blockhash(block_number as i64, res.as_mut_ptr())
}
res
}
/// Get the current block’s beneficiary address (the current miner account address)
pub fn coinbase() -> Address {
| /// Get the block's timestamp
///
/// It can be viewed as an output of Unix's `time()` function at
/// current block's inception.
pub fn timestamp() -> u64 {
unsafe { external::timestamp() as u64 }
}
/// Get the block's number
///
/// This value represents number of ancestor blocks.
/// The genesis block has a number of zero.
pub fn block_number() -> u64 {
unsafe { external::blocknumber() as u64 }
}
/// Get the block's difficulty.
pub fn difficulty() -> U256 {
unsafe { fetch_u256(|x| external::difficulty(x) ) }
}
/// Get the block's gas limit.
pub fn gas_limit() -> U256 {
unsafe { fetch_u256(|x| external::gaslimit(x) ) }
}
#[cfg(feature = "kip6")]
/// Get amount of gas left.
pub fn gas_left() -> u64 {
unsafe { external::gasleft() as u64 }
}
/// Get caller address
///
/// This is the address of the account that is directly responsible for this execution.
/// Use [`origin`] to get an address of external account - an original initiator of a transaction
pub fn sender() -> Address {
unsafe { fetch_address(|x| external::sender(x) ) }
}
/// Get execution origination address
///
/// This is the sender of original transaction.
/// It could be only external account, not a contract
pub fn origin() -> Address {
unsafe { fetch_address(|x| external::origin(x) ) }
}
/// Get deposited value by the instruction/transaction responsible for this execution.
pub fn value() -> U256 {
unsafe { fetch_u256(|x| external::value(x) ) }
}
/// Get address of currently executing account
pub fn address() -> Address {
unsafe { fetch_address(|x| external::address(x) ) }
}
/// Creates log entry with given topics and data.
///
/// There could be only up to 4 topics.
///
/// # Panics
///
/// If `topics` contains more than 4 elements then this function will trap.
pub fn log(topics: &[H256], data: &[u8]) {
unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); }
}
/// Allocates and requests [`call`] arguments (input)
///
/// Input data comes either with external transaction or from [`call`] input value.
pub fn input() -> wasm_std::Vec<u8> {
let len = unsafe { external::input_length() };
match len {
0 => wasm_std::Vec::new(),
non_zero => {
let mut data = wasm_std::Vec::with_capacity(non_zero as usize);
unsafe {
data.set_len(non_zero as usize);
external::fetch_input(data.as_mut_ptr());
}
data
}
}
}
/// Sets a [`call`] return value
///
/// Pass return data to the runtime. Runtime SHOULD trap the execution.
///
pub fn ret(data: &[u8]) -> ! {
unsafe { external::ret(data.as_ptr(), data.len() as u32); }
}
unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) {
let mut res = Address::zero();
f(res.as_mut_ptr());
res
}
unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) {
let mut res = [0u8; 32];
f(res.as_mut_ptr());
U256::from_big_endian(&res)
}
| unsafe { fetch_address(|x| external::coinbase(x) ) }
}
| identifier_body |
ext.rs | //! Safe wrapper around externalities invokes.
use wasm_std::{
self,
types::{H256, U256, Address}
};
/// Generic wasm error
#[derive(Debug)]
pub struct Error;
mod external {
extern "C" {
// Various call variants
/// Direct/classic call.
/// Corresponds to "CALL" opcode in EVM
pub fn ccall(
gas: i64,
address: *const u8,
val_ptr: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Delegate call.
/// Corresponds to "CALLCODE" opcode in EVM
pub fn dcall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Static call.
/// Corresponds to "STACICCALL" opcode in EVM
pub fn scall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
// environmental blockchain functions (runtime might not provide all of these!)
pub fn blockhash(number: i64, dest: *mut u8);
pub fn balance(address: *const u8, dest: *mut u8);
pub fn coinbase(dest: *mut u8);
pub fn timestamp() -> i64;
pub fn blocknumber() -> i64;
pub fn difficulty(dest: *mut u8);
pub fn gaslimit(dest: *mut u8);
#[cfg(feature = "kip6")]
pub fn gasleft() -> i64;
pub fn sender(dest: *mut u8);
pub fn address(dest: *mut u8);
pub fn value(dest: *mut u8);
pub fn origin(dest: *mut u8);
pub fn elog(
topic_ptr: *const u8,
topic_count: u32,
data_ptr: *const u8,
data_len: u32
);
pub fn create(
endowment: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
#[cfg(feature = "kip4")]
pub fn create2(
endowment: *const u8,
salt: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
pub fn suicide(refund: *const u8) -> !;
pub fn ret(ptr: *const u8, len: u32) -> !;
pub fn input_length() -> u32;
pub fn fetch_input(dst: *mut u8);
}
}
/// Halt execution and register account for deletion.
///
/// Value of the current account will be tranfered to `refund` address.
pub fn suicide(refund: &Address) -> ! {
unsafe { external::suicide(refund.as_ptr()); }
}
/// Get balance of the given account.
///
/// If an account is not registered in the chain yet,
/// it is considered as an account with `balance = 0`.
pub fn balance(address: &Address) -> U256 {
unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) }
}
/// Create a new account with the given code
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::zero();
unsafe {
if external::create(
endowment_arr.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
#[cfg(feature = "kip4")]
/// Create a new account with the given code and salt, requires KIP-4.
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::new();
unsafe {
if external::create2(
endowment_arr.as_ptr(),
salt.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
/// Message-call into an account
///
/// # Arguments:
/// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount
/// * `address` - an address of contract to send a call
/// * `value` - a value in Wei to send with a call
/// * `input` - a data to send with a call
/// * `result` - a mutable reference to be filled with a result data
///
/// # Returns:
///
/// Call is succeed if it returns `Result::Ok(())`
/// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting
pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
let mut value_arr = [0u8; 32];
value.to_big_endian(&mut value_arr);
unsafe {
if external::ccall(
gas as i64,
address.as_ptr(),
value_arr.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(), result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but with code at the given `address`
///
/// Effectively this function is like calling current account but with
/// different code (i.e. like `DELEGATECALL` EVM instruction).
///
/// [`call`]: fn.call.html
pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::dcall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage.
///
/// It will return an error in this case.
///
/// [`call`]: fn.call.html
pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::scall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 | else {
Err(Error)
}
}
}
/// Returns hash of the given block or H256::zero()
///
/// Only works for 256 most recent blocks excluding current
/// Returns H256::zero() in case of failure
pub fn block_hash(block_number: u64) -> H256 {
let mut res = H256::zero();
unsafe {
external::blockhash(block_number as i64, res.as_mut_ptr())
}
res
}
/// Get the current block’s beneficiary address (the current miner account address)
pub fn coinbase() -> Address {
unsafe { fetch_address(|x| external::coinbase(x) ) }
}
/// Get the block's timestamp
///
/// It can be viewed as an output of Unix's `time()` function at
/// current block's inception.
pub fn timestamp() -> u64 {
unsafe { external::timestamp() as u64 }
}
/// Get the block's number
///
/// This value represents number of ancestor blocks.
/// The genesis block has a number of zero.
pub fn block_number() -> u64 {
unsafe { external::blocknumber() as u64 }
}
/// Get the block's difficulty.
pub fn difficulty() -> U256 {
unsafe { fetch_u256(|x| external::difficulty(x) ) }
}
/// Get the block's gas limit.
pub fn gas_limit() -> U256 {
unsafe { fetch_u256(|x| external::gaslimit(x) ) }
}
#[cfg(feature = "kip6")]
/// Get amount of gas left.
pub fn gas_left() -> u64 {
unsafe { external::gasleft() as u64 }
}
/// Get caller address
///
/// This is the address of the account that is directly responsible for this execution.
/// Use [`origin`] to get an address of external account - an original initiator of a transaction
pub fn sender() -> Address {
unsafe { fetch_address(|x| external::sender(x) ) }
}
/// Get execution origination address
///
/// This is the sender of original transaction.
/// It could be only external account, not a contract
pub fn origin() -> Address {
unsafe { fetch_address(|x| external::origin(x) ) }
}
/// Get deposited value by the instruction/transaction responsible for this execution.
pub fn value() -> U256 {
unsafe { fetch_u256(|x| external::value(x) ) }
}
/// Get address of currently executing account
pub fn address() -> Address {
unsafe { fetch_address(|x| external::address(x) ) }
}
/// Creates log entry with given topics and data.
///
/// There could be only up to 4 topics.
///
/// # Panics
///
/// If `topics` contains more than 4 elements then this function will trap.
pub fn log(topics: &[H256], data: &[u8]) {
unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); }
}
/// Allocates and requests [`call`] arguments (input)
///
/// Input data comes either with external transaction or from [`call`] input value.
pub fn input() -> wasm_std::Vec<u8> {
let len = unsafe { external::input_length() };
match len {
0 => wasm_std::Vec::new(),
non_zero => {
let mut data = wasm_std::Vec::with_capacity(non_zero as usize);
unsafe {
data.set_len(non_zero as usize);
external::fetch_input(data.as_mut_ptr());
}
data
}
}
}
/// Sets a [`call`] return value
///
/// Pass return data to the runtime. Runtime SHOULD trap the execution.
///
pub fn ret(data: &[u8]) -> ! {
unsafe { external::ret(data.as_ptr(), data.len() as u32); }
}
unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) {
let mut res = Address::zero();
f(res.as_mut_ptr());
res
}
unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) {
let mut res = [0u8; 32];
f(res.as_mut_ptr());
U256::from_big_endian(&res)
}
| {
Ok(())
} | conditional_block |
ext.rs | //! Safe wrapper around externalities invokes.
use wasm_std::{
self,
types::{H256, U256, Address}
};
/// Generic wasm error
#[derive(Debug)]
pub struct Error;
mod external {
extern "C" {
// Various call variants
/// Direct/classic call.
/// Corresponds to "CALL" opcode in EVM
pub fn ccall(
gas: i64,
address: *const u8,
val_ptr: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Delegate call.
/// Corresponds to "CALLCODE" opcode in EVM
pub fn dcall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Static call.
/// Corresponds to "STACICCALL" opcode in EVM
pub fn scall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
// environmental blockchain functions (runtime might not provide all of these!)
pub fn blockhash(number: i64, dest: *mut u8);
pub fn balance(address: *const u8, dest: *mut u8);
pub fn coinbase(dest: *mut u8);
pub fn timestamp() -> i64;
pub fn blocknumber() -> i64;
pub fn difficulty(dest: *mut u8);
pub fn gaslimit(dest: *mut u8);
#[cfg(feature = "kip6")]
pub fn gasleft() -> i64;
pub fn sender(dest: *mut u8);
pub fn address(dest: *mut u8);
pub fn value(dest: *mut u8);
pub fn origin(dest: *mut u8);
pub fn elog(
topic_ptr: *const u8,
topic_count: u32,
data_ptr: *const u8,
data_len: u32
);
pub fn create(
endowment: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
#[cfg(feature = "kip4")]
pub fn create2(
endowment: *const u8,
salt: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
pub fn suicide(refund: *const u8) -> !;
pub fn ret(ptr: *const u8, len: u32) -> !;
pub fn input_length() -> u32;
pub fn fetch_input(dst: *mut u8);
}
}
/// Halt execution and register account for deletion.
///
/// Value of the current account will be tranfered to `refund` address.
pub fn suicide(refund: &Address) -> ! {
unsafe { external::suicide(refund.as_ptr()); }
}
/// Get balance of the given account.
///
/// If an account is not registered in the chain yet,
/// it is considered as an account with `balance = 0`.
pub fn balance(address: &Address) -> U256 {
unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) }
}
/// Create a new account with the given code
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::zero();
unsafe { | if external::create(
endowment_arr.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
#[cfg(feature = "kip4")]
/// Create a new account with the given code and salt, requires KIP-4.
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::new();
unsafe {
if external::create2(
endowment_arr.as_ptr(),
salt.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
/// Message-call into an account
///
/// # Arguments:
/// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount
/// * `address` - an address of contract to send a call
/// * `value` - a value in Wei to send with a call
/// * `input` - a data to send with a call
/// * `result` - a mutable reference to be filled with a result data
///
/// # Returns:
///
/// Call is succeed if it returns `Result::Ok(())`
/// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting
pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
let mut value_arr = [0u8; 32];
value.to_big_endian(&mut value_arr);
unsafe {
if external::ccall(
gas as i64,
address.as_ptr(),
value_arr.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(), result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but with code at the given `address`
///
/// Effectively this function is like calling current account but with
/// different code (i.e. like `DELEGATECALL` EVM instruction).
///
/// [`call`]: fn.call.html
pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::dcall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage.
///
/// It will return an error in this case.
///
/// [`call`]: fn.call.html
pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::scall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Returns hash of the given block or H256::zero()
///
/// Only works for 256 most recent blocks excluding current
/// Returns H256::zero() in case of failure
pub fn block_hash(block_number: u64) -> H256 {
let mut res = H256::zero();
unsafe {
external::blockhash(block_number as i64, res.as_mut_ptr())
}
res
}
/// Get the current block’s beneficiary address (the current miner account address)
pub fn coinbase() -> Address {
unsafe { fetch_address(|x| external::coinbase(x) ) }
}
/// Get the block's timestamp
///
/// It can be viewed as an output of Unix's `time()` function at
/// current block's inception.
pub fn timestamp() -> u64 {
unsafe { external::timestamp() as u64 }
}
/// Get the block's number
///
/// This value represents number of ancestor blocks.
/// The genesis block has a number of zero.
pub fn block_number() -> u64 {
unsafe { external::blocknumber() as u64 }
}
/// Get the block's difficulty.
pub fn difficulty() -> U256 {
unsafe { fetch_u256(|x| external::difficulty(x) ) }
}
/// Get the block's gas limit.
pub fn gas_limit() -> U256 {
unsafe { fetch_u256(|x| external::gaslimit(x) ) }
}
#[cfg(feature = "kip6")]
/// Get amount of gas left.
pub fn gas_left() -> u64 {
unsafe { external::gasleft() as u64 }
}
/// Get caller address
///
/// This is the address of the account that is directly responsible for this execution.
/// Use [`origin`] to get an address of external account - an original initiator of a transaction
pub fn sender() -> Address {
unsafe { fetch_address(|x| external::sender(x) ) }
}
/// Get execution origination address
///
/// This is the sender of original transaction.
/// It could be only external account, not a contract
pub fn origin() -> Address {
unsafe { fetch_address(|x| external::origin(x) ) }
}
/// Get deposited value by the instruction/transaction responsible for this execution.
pub fn value() -> U256 {
unsafe { fetch_u256(|x| external::value(x) ) }
}
/// Get address of currently executing account
pub fn address() -> Address {
unsafe { fetch_address(|x| external::address(x) ) }
}
/// Creates log entry with given topics and data.
///
/// There could be only up to 4 topics.
///
/// # Panics
///
/// If `topics` contains more than 4 elements then this function will trap.
pub fn log(topics: &[H256], data: &[u8]) {
unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); }
}
/// Allocates and requests [`call`] arguments (input)
///
/// Input data comes either with external transaction or from [`call`] input value.
pub fn input() -> wasm_std::Vec<u8> {
let len = unsafe { external::input_length() };
match len {
0 => wasm_std::Vec::new(),
non_zero => {
let mut data = wasm_std::Vec::with_capacity(non_zero as usize);
unsafe {
data.set_len(non_zero as usize);
external::fetch_input(data.as_mut_ptr());
}
data
}
}
}
/// Sets a [`call`] return value
///
/// Pass return data to the runtime. Runtime SHOULD trap the execution.
///
pub fn ret(data: &[u8]) -> ! {
unsafe { external::ret(data.as_ptr(), data.len() as u32); }
}
unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) {
let mut res = Address::zero();
f(res.as_mut_ptr());
res
}
unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) {
let mut res = [0u8; 32];
f(res.as_mut_ptr());
U256::from_big_endian(&res)
} | random_line_split | |
ext.rs | //! Safe wrapper around externalities invokes.
use wasm_std::{
self,
types::{H256, U256, Address}
};
/// Generic wasm error
#[derive(Debug)]
pub struct Error;
mod external {
extern "C" {
// Various call variants
/// Direct/classic call.
/// Corresponds to "CALL" opcode in EVM
pub fn ccall(
gas: i64,
address: *const u8,
val_ptr: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Delegate call.
/// Corresponds to "CALLCODE" opcode in EVM
pub fn dcall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
/// Static call.
/// Corresponds to "STACICCALL" opcode in EVM
pub fn scall(
gas: i64,
address: *const u8,
input_ptr: *const u8,
input_len: u32,
result_ptr: *mut u8,
result_len: u32,
) -> i32;
// environmental blockchain functions (runtime might not provide all of these!)
pub fn blockhash(number: i64, dest: *mut u8);
pub fn balance(address: *const u8, dest: *mut u8);
pub fn coinbase(dest: *mut u8);
pub fn timestamp() -> i64;
pub fn blocknumber() -> i64;
pub fn difficulty(dest: *mut u8);
pub fn gaslimit(dest: *mut u8);
#[cfg(feature = "kip6")]
pub fn gasleft() -> i64;
pub fn sender(dest: *mut u8);
pub fn address(dest: *mut u8);
pub fn value(dest: *mut u8);
pub fn origin(dest: *mut u8);
pub fn elog(
topic_ptr: *const u8,
topic_count: u32,
data_ptr: *const u8,
data_len: u32
);
pub fn create(
endowment: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
#[cfg(feature = "kip4")]
pub fn create2(
endowment: *const u8,
salt: *const u8,
code_ptr: *const u8,
code_len: u32,
result_ptr: *mut u8
) -> i32;
pub fn suicide(refund: *const u8) -> !;
pub fn ret(ptr: *const u8, len: u32) -> !;
pub fn input_length() -> u32;
pub fn fetch_input(dst: *mut u8);
}
}
/// Halt execution and register account for deletion.
///
/// Value of the current account will be tranfered to `refund` address.
pub fn suicide(refund: &Address) -> ! {
unsafe { external::suicide(refund.as_ptr()); }
}
/// Get balance of the given account.
///
/// If an account is not registered in the chain yet,
/// it is considered as an account with `balance = 0`.
pub fn balance(address: &Address) -> U256 {
unsafe { fetch_u256(|x| external::balance(address.as_ptr(), x) ) }
}
/// Create a new account with the given code
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create(endowment: U256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::zero();
unsafe {
if external::create(
endowment_arr.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
#[cfg(feature = "kip4")]
/// Create a new account with the given code and salt, requires KIP-4.
///
/// # Errors
///
/// Returns [`Error`] in case contract constructor failed.
///
/// [`Error`]: struct.Error.html
pub fn create2(endowment: U256, salt: H256, code: &[u8]) -> Result<Address, Error> {
let mut endowment_arr = [0u8; 32];
endowment.to_big_endian(&mut endowment_arr);
let mut result = Address::new();
unsafe {
if external::create2(
endowment_arr.as_ptr(),
salt.as_ptr(),
code.as_ptr(),
code.len() as u32,
(&mut result).as_mut_ptr()
) == 0 {
Ok(result)
} else {
Err(Error)
}
}
}
/// Message-call into an account
///
/// # Arguments:
/// * `gas`- a gas limit for a call. A call execution will halt if call exceed this amount
/// * `address` - an address of contract to send a call
/// * `value` - a value in Wei to send with a call
/// * `input` - a data to send with a call
/// * `result` - a mutable reference to be filled with a result data
///
/// # Returns:
///
/// Call is succeed if it returns `Result::Ok(())`
/// If call returns `Result::Err(Error)` it means tha call was failed due to execution halting
pub fn call(gas: u64, address: &Address, value: U256, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
let mut value_arr = [0u8; 32];
value.to_big_endian(&mut value_arr);
unsafe {
if external::ccall(
gas as i64,
address.as_ptr(),
value_arr.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(), result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but with code at the given `address`
///
/// Effectively this function is like calling current account but with
/// different code (i.e. like `DELEGATECALL` EVM instruction).
///
/// [`call`]: fn.call.html
pub fn call_code(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::dcall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Like [`call`], but this call and any of it's subcalls are disallowed to modify any storage.
///
/// It will return an error in this case.
///
/// [`call`]: fn.call.html
pub fn static_call(gas: u64, address: &Address, input: &[u8], result: &mut [u8]) -> Result<(), Error> {
unsafe {
if external::scall(
gas as i64,
address.as_ptr(),
input.as_ptr(),
input.len() as u32,
result.as_mut_ptr(),
result.len() as u32
) == 0 {
Ok(())
} else {
Err(Error)
}
}
}
/// Returns hash of the given block or H256::zero()
///
/// Only works for 256 most recent blocks excluding current
/// Returns H256::zero() in case of failure
pub fn block_hash(block_number: u64) -> H256 {
let mut res = H256::zero();
unsafe {
external::blockhash(block_number as i64, res.as_mut_ptr())
}
res
}
/// Get the current block’s beneficiary address (the current miner account address)
pub fn coinbase() -> Address {
unsafe { fetch_address(|x| external::coinbase(x) ) }
}
/// Get the block's timestamp
///
/// It can be viewed as an output of Unix's `time()` function at
/// current block's inception.
pub fn timestamp() -> u64 {
unsafe { external::timestamp() as u64 }
}
/// Get the block's number
///
/// This value represents number of ancestor blocks.
/// The genesis block has a number of zero.
pub fn bl | -> u64 {
unsafe { external::blocknumber() as u64 }
}
/// Get the block's difficulty.
pub fn difficulty() -> U256 {
unsafe { fetch_u256(|x| external::difficulty(x) ) }
}
/// Get the block's gas limit.
pub fn gas_limit() -> U256 {
unsafe { fetch_u256(|x| external::gaslimit(x) ) }
}
#[cfg(feature = "kip6")]
/// Get amount of gas left.
pub fn gas_left() -> u64 {
unsafe { external::gasleft() as u64 }
}
/// Get caller address
///
/// This is the address of the account that is directly responsible for this execution.
/// Use [`origin`] to get an address of external account - an original initiator of a transaction
pub fn sender() -> Address {
unsafe { fetch_address(|x| external::sender(x) ) }
}
/// Get execution origination address
///
/// This is the sender of original transaction.
/// It could be only external account, not a contract
pub fn origin() -> Address {
unsafe { fetch_address(|x| external::origin(x) ) }
}
/// Get deposited value by the instruction/transaction responsible for this execution.
pub fn value() -> U256 {
unsafe { fetch_u256(|x| external::value(x) ) }
}
/// Get address of currently executing account
pub fn address() -> Address {
unsafe { fetch_address(|x| external::address(x) ) }
}
/// Creates log entry with given topics and data.
///
/// There could be only up to 4 topics.
///
/// # Panics
///
/// If `topics` contains more than 4 elements then this function will trap.
pub fn log(topics: &[H256], data: &[u8]) {
unsafe { external::elog(topics.as_ptr() as *const u8, topics.len() as u32, data.as_ptr(), data.len() as u32); }
}
/// Allocates and requests [`call`] arguments (input)
///
/// Input data comes either with external transaction or from [`call`] input value.
pub fn input() -> wasm_std::Vec<u8> {
let len = unsafe { external::input_length() };
match len {
0 => wasm_std::Vec::new(),
non_zero => {
let mut data = wasm_std::Vec::with_capacity(non_zero as usize);
unsafe {
data.set_len(non_zero as usize);
external::fetch_input(data.as_mut_ptr());
}
data
}
}
}
/// Sets a [`call`] return value
///
/// Pass return data to the runtime. Runtime SHOULD trap the execution.
///
pub fn ret(data: &[u8]) -> ! {
unsafe { external::ret(data.as_ptr(), data.len() as u32); }
}
unsafe fn fetch_address<F>(f: F) -> Address where F: Fn(*mut u8) {
let mut res = Address::zero();
f(res.as_mut_ptr());
res
}
unsafe fn fetch_u256<F>(f: F) -> U256 where F: Fn(*mut u8) {
let mut res = [0u8; 32];
f(res.as_mut_ptr());
U256::from_big_endian(&res)
}
| ock_number() | identifier_name |
networking.py | from __future__ import division
import struct
from cStringIO import StringIO
#import pyraknet
#from pyraknet import PacketTypes, PacketReliability, PacketPriority
import socket, traceback, os, sys, threading
from Queue import Queue
from unit import Unit
from buildings.igloo import Igloo
from player import Player
import data, snowball
# This will be assigned to the pyraknet Peer instance.
net = None
def send(message, to="*", exclude=()):
"""
This is mostly a shortcut to send a message over the network.
It will fail silently if there is no network connection.
"""
if net:
if isinstance(net, Server):
net.send(message, to, exclude)
else:
net.send(message)
unit_states = ["idle", "walking", "frozen", "throwing", "gathering", "entering",
"inbuilding", "unloading", "getting_snowball", "stray"]
holding_types = [None, "fish", "crystal"]
class Connection(object):
def __init__(self, sock, recv_queue=None):
self.sock = sock
self.send_queue = Queue() # Strings
if recv_queue:
self.recv_queue = recv_queue
else:
self.recv_queue = Queue() # Message objects
# recv_queue might be reassigned in another thread. (e.g., the server
# might want all connections so share a single queue after they have
# been authenticated. Thus, this lock should be held before accessing
# or reassigning the queue. (Unless if it is being accessed in the
# only thread it is ever reassigned in.)
self.recv_queue_lock = threading.Lock()
self.send_thread = threading.Thread(target=self.send_loop)
self.send_thread.setDaemon(1)
self.recv_thread = threading.Thread(target=self.receive_loop)
self.recv_thread.setDaemon(1)
self.send_thread.start()
self.recv_thread.start()
def replace_recv_queue(self, new_queue):
"""
This will assign a new queue for receiving messages for this connection.
All required locking will be done, and any messages left in the old
queue will be added to the new one.
"""
self.recv_queue_lock.acquire()
while not self.recv_queue.empty():
new_queue.put(self.recv_queue.get(True), True)
self.recv_queue = new_queue
self.recv_queue_lock.release()
def send_loop(self):
try:
#bytes = 0
#last_print = 0
while True:
d = self.send_queue.get(True)
self.sock.sendall(d)
#bytes += len(d)
#if data.get_ticks() > last_print + 1000:
#last_print = data.get_ticks()
#print "sent kB:", bytes/1000.
#bytes = 0
except:
print "****** Exception while sending: ****************"
traceback.print_exc()
self.sock.close()
def receive_loop(self):
try:
while True:
type_id = struct.unpack("!B", self.read(1))[0]
cls = indexed_message_classes[type_id]
m = cls.from_stream(self)
m.connection = self
self.recv_queue_lock.acquire()
self.recv_queue.put(m, True)
self.recv_queue_lock.release()
#print "*** Received ****", m
except:
print "****** Exception while receiving: **************"
traceback.print_exc()
self.sock.close()
def send(self, message):
"""
Queues a message up for sending.
Takes a single argument, ``message``, which can either be a ``Message``
object or a string.
"""
if isinstance(message, basestring):
self.send_queue.put(message)
else:
self.send_queue.put(struct.pack("!B", message.type_id) +
message.pack())
def read(self, count):
"""
Reads exactly ``count`` bytes from the socket.
"""
d = self.sock.recv(count, socket.MSG_WAITALL)
assert len(d) == count
return d
class Server(object):
def __init__(self, port):
self.connections = set()
self.recv_queue = Queue()
self.port = port
def start(self):
self.listen_thread = threading.Thread(target=self.listen)
self.listen_thread.setDaemon(1)
self.listen_thread.start()
def listen(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", self.port))
s.listen(1)
while True:
try:
clientsock, clientaddr = s.accept()
except:
traceback.print_exc()
continue
self.new_connection(Connection(clientsock))
def new_connection(self, connection):
self.connections.add(connection)
connection.replace_recv_queue(self.recv_queue)
def send(self, message, to="*", exclude=()):
if isinstance(message, Message):
message = struct.pack("!B", message.type_id) + message.pack()
if to == "*":
to = set(self.connections)
for connection in to:
if connection in exclude or not connection: continue
connection.send(message)
def send_chat_message(msg, from_player, to_player=None):
if to_player:
to_id = to_player.player_id
else:
to_id = 0xffff
m = MChatMessage(msg, from_player.player_id, to_id)
if data.THIS_IS_SERVER:
if to_player:
send(m, to=[to_player.connection])
else:
send(m, exclude=[from_player.connection])
else:
net.send(m)
class Writer(object):
def __init__(self, data=None):
if data is None:
data = StringIO()
self.data = data
def single(self, format, *values):
if format[0] not in "@=<>!":
format = "!"+format
self.data.write(struct.pack(format, *values))
def multi(self, format, values):
self.single("!H", len(values))
if len(format.strip("@=<>!")) > 1:
for v in values:
self.single(format, *v)
else:
for v in values:
self.single(format, v)
def string(self, s):
self.multi("s", s)
class Reader(object):
def __init__(self, data):
if not hasattr(data, "read"):
data = StringIO(data)
self.data = data
def single(self, format):
if format[0] not in "@=<>!":
format = "!"+format
return struct.unpack(format, self.data.read(struct.calcsize(format)))
def multi(self, format, limit=1000):
length = self.single("!H")[0]
if length > limit:
raise RuntimeError("multi length (%d) is above limit (%d)!!" %
(length, limit))
if len(format.strip("@=<>!")) > 1:
for i in range(length):
yield self.single(format)
else:
for i in range(length):
yield self.single(format)[0]
def string(self):
return "".join(self.multi("s"))
class Message(object):
type_id = None
struct_format = None
attrs = None
def __init__(self, *pargs, **kwargs):
attrs = list(self.attrs)
for arg in pargs:
n = attrs.pop(0)
setattr(self, n, arg)
for n in attrs:
setattr(self, n, kwargs.pop(n))
if kwargs:
raise TypeError("unexpected keyword argument '%s'" %
kwargs.keys()[0])
def values(self):
l = []
for a in self.attrs:
l.append(getattr(self, a))
return l
@classmethod
def from_stream(cls, stream):
res = struct.unpack(cls.struct_format,
stream.read(struct.calcsize(cls.struct_format)))
return cls(*res)
def pack(self):
return struct.pack(self.struct_format, *[getattr(self, n) for n in
self.attrs])
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__,
dict([(a, getattr(self, a)) for a in self.attrs]))
# Unit stuff
class MUnitPosition(Message):
type_id = 100
struct_format = "!HhhbbBB"
attrs = ('unit_id', 'x', 'y', 'dir_x', 'dir_y', 'state_id', 'holding_id')
# 101 is free
class MUnitAddEmblem(Message):
type_id = 102
attrs = ("unit_id", "image_name", "animate", "offset")
def pack(self):
w = Writer()
w.single("H", self.unit_id)
w.string(self.image_name)
w.single("B", self.animate)
w.single("hh", *self.offset)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), bool(r.single("B")),
r.single("hh"))
class MUnitRemoveEmblem(Message):
type_id = 103
struct_format = "!H"
attrs = ("unit_id",)
# TODO support removing only one emblem, instead of all.
class MWarmth(Message):
type_id = 104
struct_format = "!HB"
attrs = ("unit_id", "warmth")
class MSnowball(Message):
type_id = 105
struct_format = "!HIIIIHHBHH"
attrs = ("unit_id", "start_x", "start_y", "end_x", "end_y", "end_tile_x",
"end_tile_y", "snowballs_left", 'px', 'py')
class MUnitIglooChange(Message):
type_id = 109
struct_format = "!HH"
attrs = ("unit_id", "building_id")
# Building stuff
class MBuildingOwner(Message):
type_id = 110
struct_format = "!HH"
attrs = ('building_id', 'player_id')
class MBuildingJeopardy(Message):
type_id = 111
struct_format = "!HBH"
attrs = ('building_id', 'jeopardy', 'player_taking_over_id')
class MBuildingStorage(Message):
type_id = 112
struct_format = "!Hhh"
attrs = ("building_id", 'fish', 'crystal')
class MCreateDynamicMapObject(Message):
type_id = 114
attrs = ("dmo_id", "image_name", "position", "obstruction", "hidden",
"minimapimage")
def | (self):
w = Writer()
w.single("H", self.dmo_id)
w.string(self.image_name)
w.single("HH", *self.position)
w.single("B", (self.hidden << 0) + (self.obstruction << 1))
w.string(self.minimapimage)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
dmo_id = r.single("H")[0]
image_name = r.string()
pos = r.single("HH")
v = r.single("B")[0] # Two bools are in one byte.
o = bool(v & 2) # second bit
h = bool(v & 1) # first bit
mini = r.string()
return cls(dmo_id, image_name, pos, o, h, mini)
class MDMOHidden(Message):
type_id = 115
struct_format = "!HB"
attrs = ("dmo_id", "hidden")
class MDMOPosition(Message):
type_id = 116
# TODO
class MResourceQuantity(Message):
type_id = 117
struct_format = "!hhh"
attrs = ("tx", "ty", "q")
# Player/connection stuff
class MNewPlayer(Message):
type_id = 120
attrs = ("player_id", "name", "color", "loading")
def pack(self):
w = Writer()
w.single("H", self.player_id)
w.string(self.name)
w.single("BBB", *self.color)
w.single("B", self.loading)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), r.single("BBB"),
bool(r.single("B")[0]))
class MWhoYouAre(Message):
type_id = 121
struct_format = "!H"
attrs = ("player_id",)
class MSetJob(Message):
type_id = 122
attrs = ("pos", "run_mode", "unit_ids")
def pack(self):
w = Writer()
w.single("HH", *self.pos)
w.single("B", self.run_mode)
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
pos = r.single("HH")
run_mode = bool(r.single("B")[0])
unit_ids = list(r.multi("H"))
return cls(pos, run_mode, unit_ids)
class MDisbanUnits(Message):
type_id = 123
attrs = ("unit_ids",)
def pack(self):
w = Writer()
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(list(r.multi("H")))
class MClientFinishedLoading(Message):
type_id = 124
struct_format = "!H"
attrs = ("player_id",)
class MGameStart(Message):
type_id = 224
struct_format = ""
attrs = ()
class MChatMessage(Message):
type_id = 225
attrs = ("msg", "from_id", "to_id")
def pack(self):
w = Writer()
w.string(self.msg)
# to_id will be 0xffff to specify everyone.
w.single("HH", self.from_id, self.to_id)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string(), *r.single("HH"))
class MPlayerVictory(Message):
type_id = 226
attrs = ("player_id", "victory")
def pack(self):
return struct.pack("!Hs", self.player_id,
{True:'t', False:'f', None:'n'}[self.victory])
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0],
dict(t=True, f=False, n=None)[r.single("s")[0]])
class MMapname(Message):
type_id = 227
attrs = ("mapname",)
def pack(self):
w = Writer()
w.string(self.mapname)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string())
message_classes = [MUnitPosition, MBuildingOwner, MBuildingJeopardy,
MBuildingStorage, MSnowball, MWarmth, MNewPlayer, MWhoYouAre, MSetJob,
MDisbanUnits, MCreateDynamicMapObject, MDMOHidden, MDMOPosition,
MResourceQuantity, MUnitAddEmblem, MUnitRemoveEmblem, MGameStart,
MChatMessage, MUnitIglooChange, MClientFinishedLoading, MPlayerVictory,
MMapname]
indexed_message_classes = {}
for cls in message_classes:
indexed_message_classes[cls.type_id] = cls
class NetLoopBase(object):
def handle_network(self):
while not self.net.recv_queue.empty():
message = self.net.recv_queue.get()
if not self.handle_message(message):
print "Unhandled message: ", message
def handle_message(self, message):
attr = "handle_" + message.__class__.__name__
if hasattr(self, attr):
getattr(self, attr)(message)
return True
else:
return False
class MasterUnit(Unit):
def __init__(self, *pargs, **kwargs):
self.observers = []
Unit.__init__(self, *pargs, **kwargs)
self._last_state_sent = self.state
def _set_warmth(self, w):
self._warmth = w
if hasattr(self, "unit_id"):
send(MWarmth(self.unit_id, w))
warmth = property(lambda self:self._warmth, _set_warmth)
#def _set_max_warmth(self, mw):
#self._max_warmth = mw
#self.call_remote("update_max_warmth", mw)
#max_warmth = property(lambda self:self._max_warmth, _set_max_warmth)
def _set_igloo(self, i):
self._igloo = i
if hasattr(self, "unit_id") and hasattr(i, "building_id"):
send(MUnitIglooChange(self.unit_id, i.building_id))
def move(self):
pos = self.x, self.y
Unit.move(self)
if pos != (self.x, self.y) or (self.state != self._last_state_sent and
self.state not in ['throwing', 'getting_snowball']):
send(MUnitPosition(self.unit_id, self.x, self.y,
self.direction[0], self.direction[1],
unit_states.index(self.state),
holding_types.index(self.holding)))
self._last_state_sent = self.state
def throw_snowball(self, target):
s = Unit.throw_snowball(self, target)
send(MSnowball(self.unit_id, s.start_x, s.start_y, s.end_x, s.end_y,
s.end_tile.x, s.end_tile.y,
self.snowballs, self.x, self.y))
self._last_state_sent = "throwing"
return s
def add_emblem(self, emblem):
m = MUnitAddEmblem(self.unit_id, emblem.image_name, emblem.animate,
emblem.offset)
send(m)
Unit.add_emblem(self, emblem)
def remove_emblem(self, emblem):
if emblem in self._emblems:
send(MUnitRemoveEmblem(self.unit_id))
Unit.remove_emblem(self, emblem)
class SlaveUnit(Unit):
def __init__(self, *pargs, **kwargs):
Unit.__init__(self, *pargs, **kwargs)
# State of the unit will be set later.
self._next_position = None
self._next_snowball = None
def handle_MUnitPosition(self, m):
state = unit_states[m.state_id]
holding = holding_types[m.holding_id]
self._next_position = ((m.x, m.y), (m.dir_x, m.dir_y), state, holding)
def handle_MSnowball(self, m):
self.snowballs = m.snowballs_left
#self.current_frame = 1
dir = self.direction_to_tile(m.end_tile_x, m.end_tile_y)
self._next_position = ((m.px, m.py), dir, "throwing", None)
end_tile = data.map.tiles[(m.end_tile_x, m.end_tile_y)]
self._next_snowball = (m.start_x, m.start_y, m.end_x, m.end_y, end_tile)
def handle_MWarmth(self, m):
self.warmth = m.warmth
def handle_MUnitIglooChange(self, m):
try:
self.igloo = data.buildings[m.building_id]
except KeyError:
pass
def handle_MUnitAddEmblem(self, m):
self.add_emblem(Emblem(m.image_name, m.animate, m.offset))
def handle_MUnitRemoveEmblem(self, m):
for e in list(self._emblems):
self.remove_emblem(e)
def move(self):
startxy = self.x, self.y
try_next_position = True
if self.state == "throwing" and self.current_frame < 9:
# We don't want to process a new state if we are still in a throw.
try_next_position = False
elif self.moving:
time_passed = data.get_ticks()-self.counter
self.offset = int(time_passed/self.walk_speed*32)
if self.offset < 32:
try_next_position = False
if try_next_position:
self.offset = 0
if self._next_position:
p, self.direction, self.state, self.holding = self._next_position
if (self.x, self.y) == p:
self.moving = False
else:
self.moving = True
self.x, self.y = p
self._next_posision = None
if self.state == "throwing" and self._next_snowball:
# We need to throw a snowball!
self.current_frame = 1
data.snowballs.add(snowball.Snowball(*self._next_snowball))
self._next_snowball = None
self.moving = False
if self.snowballs == 0:
self._next_position = ((self.x, self.y), self.direction,
"getting_snowball", None)
self.counter = data.get_ticks()
else:
self.moving = False
endxy = self.x, self.y
if startxy != endxy:
self.move_sprites(startxy, endxy)
class MasterIgloo(Igloo):
def __init__(self, *pargs, **kwargs):
self.observers = set()
Igloo.__init__(self, *pargs, **kwargs)
def _set_player(self, player):
if player != self.player:
Igloo._set_player(self, player)
send(MBuildingOwner(self.building_id, player.player_id))
player = property(Igloo._get_player, _set_player)
def _set_jeopardy(self, j):
self._jeopardy = j
if hasattr(self, 'building_id'):
send(MBuildingJeopardy(self.building_id, j,
self.player_taking_over.player_id))
jeopardy = property((lambda self:self._jeopardy), _set_jeopardy)
def storage_changed(self):
Igloo.storage_changed(self)
m = MBuildingStorage(self.building_id, self.num_storage('fish'),
self.num_storage('crystal'))
send(m)
class SlaveIgloo(Igloo):
def handle_MBuildingOwner(self, m):
# This will change the player of all the units too.
if m.player_id in data.players:
p = data.players[m.player_id]
if p != self.player:
self.player = p
self.region.player = p
def handle_MBuildingJeopardy(self, m):
self.jeopardy = bool(m.jeopardy)
self.player_taking_over = data.players[m.player_taking_over_id]
self.jeopardy_count = data.get_ticks()
def handle_MBuildingStorage(self, m):
self._storage['fish'] = m.fish
self._storage['crystal'] = m.crystal
self.storage_changed()
from plugin import Emblem, DynamicMapObject, BaseDynamicMapObject
class SlaveDynamicMapObject(BaseDynamicMapObject):
def handle_MDMOHidden(self, m):
self.hidden = bool(m.hidden)
| pack | identifier_name |
networking.py | from __future__ import division
import struct
from cStringIO import StringIO
#import pyraknet
#from pyraknet import PacketTypes, PacketReliability, PacketPriority
import socket, traceback, os, sys, threading
from Queue import Queue
from unit import Unit
from buildings.igloo import Igloo
from player import Player
import data, snowball
# This will be assigned to the pyraknet Peer instance.
net = None
def send(message, to="*", exclude=()):
"""
This is mostly a shortcut to send a message over the network.
It will fail silently if there is no network connection.
"""
if net:
if isinstance(net, Server):
net.send(message, to, exclude)
else:
net.send(message)
unit_states = ["idle", "walking", "frozen", "throwing", "gathering", "entering",
"inbuilding", "unloading", "getting_snowball", "stray"]
holding_types = [None, "fish", "crystal"]
class Connection(object):
def __init__(self, sock, recv_queue=None):
self.sock = sock
self.send_queue = Queue() # Strings
if recv_queue:
self.recv_queue = recv_queue
else:
self.recv_queue = Queue() # Message objects
# recv_queue might be reassigned in another thread. (e.g., the server
# might want all connections so share a single queue after they have
# been authenticated. Thus, this lock should be held before accessing
# or reassigning the queue. (Unless if it is being accessed in the
# only thread it is ever reassigned in.)
self.recv_queue_lock = threading.Lock()
self.send_thread = threading.Thread(target=self.send_loop)
self.send_thread.setDaemon(1)
self.recv_thread = threading.Thread(target=self.receive_loop)
self.recv_thread.setDaemon(1)
self.send_thread.start()
self.recv_thread.start()
def replace_recv_queue(self, new_queue):
"""
This will assign a new queue for receiving messages for this connection.
All required locking will be done, and any messages left in the old
queue will be added to the new one.
"""
self.recv_queue_lock.acquire()
while not self.recv_queue.empty():
new_queue.put(self.recv_queue.get(True), True)
self.recv_queue = new_queue
self.recv_queue_lock.release()
def send_loop(self):
try:
#bytes = 0
#last_print = 0
while True:
d = self.send_queue.get(True)
self.sock.sendall(d)
#bytes += len(d)
#if data.get_ticks() > last_print + 1000:
#last_print = data.get_ticks()
#print "sent kB:", bytes/1000.
#bytes = 0
except:
print "****** Exception while sending: ****************"
traceback.print_exc()
self.sock.close()
def receive_loop(self):
try:
while True:
type_id = struct.unpack("!B", self.read(1))[0]
cls = indexed_message_classes[type_id]
m = cls.from_stream(self)
m.connection = self
self.recv_queue_lock.acquire()
self.recv_queue.put(m, True)
self.recv_queue_lock.release()
#print "*** Received ****", m
except:
print "****** Exception while receiving: **************"
traceback.print_exc()
self.sock.close()
def send(self, message):
"""
Queues a message up for sending.
Takes a single argument, ``message``, which can either be a ``Message``
object or a string.
"""
if isinstance(message, basestring):
self.send_queue.put(message)
else:
self.send_queue.put(struct.pack("!B", message.type_id) +
message.pack())
def read(self, count):
"""
Reads exactly ``count`` bytes from the socket.
"""
d = self.sock.recv(count, socket.MSG_WAITALL)
assert len(d) == count
return d
class Server(object):
def __init__(self, port):
self.connections = set()
self.recv_queue = Queue()
self.port = port
def start(self):
self.listen_thread = threading.Thread(target=self.listen)
self.listen_thread.setDaemon(1)
self.listen_thread.start()
def listen(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", self.port))
s.listen(1)
while True:
try:
clientsock, clientaddr = s.accept()
except:
traceback.print_exc()
continue
self.new_connection(Connection(clientsock))
def new_connection(self, connection):
self.connections.add(connection)
connection.replace_recv_queue(self.recv_queue)
def send(self, message, to="*", exclude=()):
if isinstance(message, Message):
message = struct.pack("!B", message.type_id) + message.pack()
if to == "*":
to = set(self.connections)
for connection in to:
if connection in exclude or not connection: continue
connection.send(message)
def send_chat_message(msg, from_player, to_player=None):
if to_player:
to_id = to_player.player_id
else:
to_id = 0xffff
m = MChatMessage(msg, from_player.player_id, to_id)
if data.THIS_IS_SERVER:
if to_player:
send(m, to=[to_player.connection])
else:
send(m, exclude=[from_player.connection])
else:
net.send(m)
class Writer(object):
def __init__(self, data=None):
if data is None:
data = StringIO()
self.data = data
def single(self, format, *values):
if format[0] not in "@=<>!":
format = "!"+format
self.data.write(struct.pack(format, *values))
def multi(self, format, values):
self.single("!H", len(values))
if len(format.strip("@=<>!")) > 1:
for v in values:
self.single(format, *v)
else:
for v in values:
self.single(format, v)
def string(self, s):
self.multi("s", s)
class Reader(object):
def __init__(self, data):
if not hasattr(data, "read"):
data = StringIO(data)
self.data = data
def single(self, format):
if format[0] not in "@=<>!":
format = "!"+format
return struct.unpack(format, self.data.read(struct.calcsize(format)))
def multi(self, format, limit=1000):
length = self.single("!H")[0]
if length > limit:
raise RuntimeError("multi length (%d) is above limit (%d)!!" %
(length, limit))
if len(format.strip("@=<>!")) > 1:
for i in range(length):
yield self.single(format)
else:
for i in range(length):
yield self.single(format)[0]
def string(self):
return "".join(self.multi("s"))
class Message(object):
type_id = None
struct_format = None
attrs = None
def __init__(self, *pargs, **kwargs):
attrs = list(self.attrs)
for arg in pargs:
n = attrs.pop(0)
setattr(self, n, arg)
for n in attrs:
setattr(self, n, kwargs.pop(n))
if kwargs:
raise TypeError("unexpected keyword argument '%s'" %
kwargs.keys()[0])
def values(self):
l = []
for a in self.attrs:
l.append(getattr(self, a))
return l
@classmethod
def from_stream(cls, stream):
res = struct.unpack(cls.struct_format,
stream.read(struct.calcsize(cls.struct_format)))
return cls(*res)
def pack(self):
return struct.pack(self.struct_format, *[getattr(self, n) for n in
self.attrs])
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__,
dict([(a, getattr(self, a)) for a in self.attrs]))
# Unit stuff
class MUnitPosition(Message):
type_id = 100
struct_format = "!HhhbbBB"
attrs = ('unit_id', 'x', 'y', 'dir_x', 'dir_y', 'state_id', 'holding_id')
# 101 is free
class MUnitAddEmblem(Message):
type_id = 102
attrs = ("unit_id", "image_name", "animate", "offset")
def pack(self):
w = Writer()
w.single("H", self.unit_id)
w.string(self.image_name)
w.single("B", self.animate)
w.single("hh", *self.offset)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), bool(r.single("B")),
r.single("hh"))
class MUnitRemoveEmblem(Message):
type_id = 103
struct_format = "!H"
attrs = ("unit_id",)
# TODO support removing only one emblem, instead of all.
class MWarmth(Message):
type_id = 104
struct_format = "!HB"
attrs = ("unit_id", "warmth")
class MSnowball(Message):
type_id = 105
struct_format = "!HIIIIHHBHH"
attrs = ("unit_id", "start_x", "start_y", "end_x", "end_y", "end_tile_x",
"end_tile_y", "snowballs_left", 'px', 'py')
class MUnitIglooChange(Message):
type_id = 109
struct_format = "!HH"
attrs = ("unit_id", "building_id")
# Building stuff
class MBuildingOwner(Message):
type_id = 110
struct_format = "!HH"
attrs = ('building_id', 'player_id')
class MBuildingJeopardy(Message):
type_id = 111
struct_format = "!HBH"
attrs = ('building_id', 'jeopardy', 'player_taking_over_id')
class MBuildingStorage(Message):
type_id = 112
struct_format = "!Hhh"
attrs = ("building_id", 'fish', 'crystal')
class MCreateDynamicMapObject(Message):
type_id = 114
attrs = ("dmo_id", "image_name", "position", "obstruction", "hidden",
"minimapimage")
def pack(self):
w = Writer()
w.single("H", self.dmo_id)
w.string(self.image_name)
w.single("HH", *self.position)
w.single("B", (self.hidden << 0) + (self.obstruction << 1))
w.string(self.minimapimage)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
dmo_id = r.single("H")[0]
image_name = r.string()
pos = r.single("HH")
v = r.single("B")[0] # Two bools are in one byte.
o = bool(v & 2) # second bit
h = bool(v & 1) # first bit
mini = r.string()
return cls(dmo_id, image_name, pos, o, h, mini)
class MDMOHidden(Message): | type_id = 116
# TODO
class MResourceQuantity(Message):
type_id = 117
struct_format = "!hhh"
attrs = ("tx", "ty", "q")
# Player/connection stuff
class MNewPlayer(Message):
type_id = 120
attrs = ("player_id", "name", "color", "loading")
def pack(self):
w = Writer()
w.single("H", self.player_id)
w.string(self.name)
w.single("BBB", *self.color)
w.single("B", self.loading)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), r.single("BBB"),
bool(r.single("B")[0]))
class MWhoYouAre(Message):
type_id = 121
struct_format = "!H"
attrs = ("player_id",)
class MSetJob(Message):
type_id = 122
attrs = ("pos", "run_mode", "unit_ids")
def pack(self):
w = Writer()
w.single("HH", *self.pos)
w.single("B", self.run_mode)
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
pos = r.single("HH")
run_mode = bool(r.single("B")[0])
unit_ids = list(r.multi("H"))
return cls(pos, run_mode, unit_ids)
class MDisbanUnits(Message):
type_id = 123
attrs = ("unit_ids",)
def pack(self):
w = Writer()
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(list(r.multi("H")))
class MClientFinishedLoading(Message):
type_id = 124
struct_format = "!H"
attrs = ("player_id",)
class MGameStart(Message):
type_id = 224
struct_format = ""
attrs = ()
class MChatMessage(Message):
type_id = 225
attrs = ("msg", "from_id", "to_id")
def pack(self):
w = Writer()
w.string(self.msg)
# to_id will be 0xffff to specify everyone.
w.single("HH", self.from_id, self.to_id)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string(), *r.single("HH"))
class MPlayerVictory(Message):
type_id = 226
attrs = ("player_id", "victory")
def pack(self):
return struct.pack("!Hs", self.player_id,
{True:'t', False:'f', None:'n'}[self.victory])
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0],
dict(t=True, f=False, n=None)[r.single("s")[0]])
class MMapname(Message):
type_id = 227
attrs = ("mapname",)
def pack(self):
w = Writer()
w.string(self.mapname)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string())
message_classes = [MUnitPosition, MBuildingOwner, MBuildingJeopardy,
MBuildingStorage, MSnowball, MWarmth, MNewPlayer, MWhoYouAre, MSetJob,
MDisbanUnits, MCreateDynamicMapObject, MDMOHidden, MDMOPosition,
MResourceQuantity, MUnitAddEmblem, MUnitRemoveEmblem, MGameStart,
MChatMessage, MUnitIglooChange, MClientFinishedLoading, MPlayerVictory,
MMapname]
indexed_message_classes = {}
for cls in message_classes:
indexed_message_classes[cls.type_id] = cls
class NetLoopBase(object):
def handle_network(self):
while not self.net.recv_queue.empty():
message = self.net.recv_queue.get()
if not self.handle_message(message):
print "Unhandled message: ", message
def handle_message(self, message):
attr = "handle_" + message.__class__.__name__
if hasattr(self, attr):
getattr(self, attr)(message)
return True
else:
return False
class MasterUnit(Unit):
def __init__(self, *pargs, **kwargs):
self.observers = []
Unit.__init__(self, *pargs, **kwargs)
self._last_state_sent = self.state
def _set_warmth(self, w):
self._warmth = w
if hasattr(self, "unit_id"):
send(MWarmth(self.unit_id, w))
warmth = property(lambda self:self._warmth, _set_warmth)
#def _set_max_warmth(self, mw):
#self._max_warmth = mw
#self.call_remote("update_max_warmth", mw)
#max_warmth = property(lambda self:self._max_warmth, _set_max_warmth)
def _set_igloo(self, i):
self._igloo = i
if hasattr(self, "unit_id") and hasattr(i, "building_id"):
send(MUnitIglooChange(self.unit_id, i.building_id))
def move(self):
pos = self.x, self.y
Unit.move(self)
if pos != (self.x, self.y) or (self.state != self._last_state_sent and
self.state not in ['throwing', 'getting_snowball']):
send(MUnitPosition(self.unit_id, self.x, self.y,
self.direction[0], self.direction[1],
unit_states.index(self.state),
holding_types.index(self.holding)))
self._last_state_sent = self.state
def throw_snowball(self, target):
s = Unit.throw_snowball(self, target)
send(MSnowball(self.unit_id, s.start_x, s.start_y, s.end_x, s.end_y,
s.end_tile.x, s.end_tile.y,
self.snowballs, self.x, self.y))
self._last_state_sent = "throwing"
return s
def add_emblem(self, emblem):
m = MUnitAddEmblem(self.unit_id, emblem.image_name, emblem.animate,
emblem.offset)
send(m)
Unit.add_emblem(self, emblem)
def remove_emblem(self, emblem):
if emblem in self._emblems:
send(MUnitRemoveEmblem(self.unit_id))
Unit.remove_emblem(self, emblem)
class SlaveUnit(Unit):
def __init__(self, *pargs, **kwargs):
Unit.__init__(self, *pargs, **kwargs)
# State of the unit will be set later.
self._next_position = None
self._next_snowball = None
def handle_MUnitPosition(self, m):
state = unit_states[m.state_id]
holding = holding_types[m.holding_id]
self._next_position = ((m.x, m.y), (m.dir_x, m.dir_y), state, holding)
def handle_MSnowball(self, m):
self.snowballs = m.snowballs_left
#self.current_frame = 1
dir = self.direction_to_tile(m.end_tile_x, m.end_tile_y)
self._next_position = ((m.px, m.py), dir, "throwing", None)
end_tile = data.map.tiles[(m.end_tile_x, m.end_tile_y)]
self._next_snowball = (m.start_x, m.start_y, m.end_x, m.end_y, end_tile)
def handle_MWarmth(self, m):
self.warmth = m.warmth
def handle_MUnitIglooChange(self, m):
try:
self.igloo = data.buildings[m.building_id]
except KeyError:
pass
def handle_MUnitAddEmblem(self, m):
self.add_emblem(Emblem(m.image_name, m.animate, m.offset))
def handle_MUnitRemoveEmblem(self, m):
for e in list(self._emblems):
self.remove_emblem(e)
def move(self):
startxy = self.x, self.y
try_next_position = True
if self.state == "throwing" and self.current_frame < 9:
# We don't want to process a new state if we are still in a throw.
try_next_position = False
elif self.moving:
time_passed = data.get_ticks()-self.counter
self.offset = int(time_passed/self.walk_speed*32)
if self.offset < 32:
try_next_position = False
if try_next_position:
self.offset = 0
if self._next_position:
p, self.direction, self.state, self.holding = self._next_position
if (self.x, self.y) == p:
self.moving = False
else:
self.moving = True
self.x, self.y = p
self._next_posision = None
if self.state == "throwing" and self._next_snowball:
# We need to throw a snowball!
self.current_frame = 1
data.snowballs.add(snowball.Snowball(*self._next_snowball))
self._next_snowball = None
self.moving = False
if self.snowballs == 0:
self._next_position = ((self.x, self.y), self.direction,
"getting_snowball", None)
self.counter = data.get_ticks()
else:
self.moving = False
endxy = self.x, self.y
if startxy != endxy:
self.move_sprites(startxy, endxy)
class MasterIgloo(Igloo):
def __init__(self, *pargs, **kwargs):
self.observers = set()
Igloo.__init__(self, *pargs, **kwargs)
def _set_player(self, player):
if player != self.player:
Igloo._set_player(self, player)
send(MBuildingOwner(self.building_id, player.player_id))
player = property(Igloo._get_player, _set_player)
def _set_jeopardy(self, j):
self._jeopardy = j
if hasattr(self, 'building_id'):
send(MBuildingJeopardy(self.building_id, j,
self.player_taking_over.player_id))
jeopardy = property((lambda self:self._jeopardy), _set_jeopardy)
def storage_changed(self):
Igloo.storage_changed(self)
m = MBuildingStorage(self.building_id, self.num_storage('fish'),
self.num_storage('crystal'))
send(m)
class SlaveIgloo(Igloo):
def handle_MBuildingOwner(self, m):
# This will change the player of all the units too.
if m.player_id in data.players:
p = data.players[m.player_id]
if p != self.player:
self.player = p
self.region.player = p
def handle_MBuildingJeopardy(self, m):
self.jeopardy = bool(m.jeopardy)
self.player_taking_over = data.players[m.player_taking_over_id]
self.jeopardy_count = data.get_ticks()
def handle_MBuildingStorage(self, m):
self._storage['fish'] = m.fish
self._storage['crystal'] = m.crystal
self.storage_changed()
from plugin import Emblem, DynamicMapObject, BaseDynamicMapObject
class SlaveDynamicMapObject(BaseDynamicMapObject):
def handle_MDMOHidden(self, m):
self.hidden = bool(m.hidden) | type_id = 115
struct_format = "!HB"
attrs = ("dmo_id", "hidden")
class MDMOPosition(Message): | random_line_split |
networking.py | from __future__ import division
import struct
from cStringIO import StringIO
#import pyraknet
#from pyraknet import PacketTypes, PacketReliability, PacketPriority
import socket, traceback, os, sys, threading
from Queue import Queue
from unit import Unit
from buildings.igloo import Igloo
from player import Player
import data, snowball
# This will be assigned to the pyraknet Peer instance.
net = None
def send(message, to="*", exclude=()):
"""
This is mostly a shortcut to send a message over the network.
It will fail silently if there is no network connection.
"""
if net:
if isinstance(net, Server):
net.send(message, to, exclude)
else:
net.send(message)
unit_states = ["idle", "walking", "frozen", "throwing", "gathering", "entering",
"inbuilding", "unloading", "getting_snowball", "stray"]
holding_types = [None, "fish", "crystal"]
class Connection(object):
def __init__(self, sock, recv_queue=None):
self.sock = sock
self.send_queue = Queue() # Strings
if recv_queue:
self.recv_queue = recv_queue
else:
self.recv_queue = Queue() # Message objects
# recv_queue might be reassigned in another thread. (e.g., the server
# might want all connections so share a single queue after they have
# been authenticated. Thus, this lock should be held before accessing
# or reassigning the queue. (Unless if it is being accessed in the
# only thread it is ever reassigned in.)
self.recv_queue_lock = threading.Lock()
self.send_thread = threading.Thread(target=self.send_loop)
self.send_thread.setDaemon(1)
self.recv_thread = threading.Thread(target=self.receive_loop)
self.recv_thread.setDaemon(1)
self.send_thread.start()
self.recv_thread.start()
def replace_recv_queue(self, new_queue):
"""
This will assign a new queue for receiving messages for this connection.
All required locking will be done, and any messages left in the old
queue will be added to the new one.
"""
self.recv_queue_lock.acquire()
while not self.recv_queue.empty():
new_queue.put(self.recv_queue.get(True), True)
self.recv_queue = new_queue
self.recv_queue_lock.release()
def send_loop(self):
try:
#bytes = 0
#last_print = 0
while True:
d = self.send_queue.get(True)
self.sock.sendall(d)
#bytes += len(d)
#if data.get_ticks() > last_print + 1000:
#last_print = data.get_ticks()
#print "sent kB:", bytes/1000.
#bytes = 0
except:
print "****** Exception while sending: ****************"
traceback.print_exc()
self.sock.close()
def receive_loop(self):
try:
while True:
type_id = struct.unpack("!B", self.read(1))[0]
cls = indexed_message_classes[type_id]
m = cls.from_stream(self)
m.connection = self
self.recv_queue_lock.acquire()
self.recv_queue.put(m, True)
self.recv_queue_lock.release()
#print "*** Received ****", m
except:
print "****** Exception while receiving: **************"
traceback.print_exc()
self.sock.close()
def send(self, message):
"""
Queues a message up for sending.
Takes a single argument, ``message``, which can either be a ``Message``
object or a string.
"""
if isinstance(message, basestring):
self.send_queue.put(message)
else:
self.send_queue.put(struct.pack("!B", message.type_id) +
message.pack())
def read(self, count):
"""
Reads exactly ``count`` bytes from the socket.
"""
d = self.sock.recv(count, socket.MSG_WAITALL)
assert len(d) == count
return d
class Server(object):
def __init__(self, port):
self.connections = set()
self.recv_queue = Queue()
self.port = port
def start(self):
self.listen_thread = threading.Thread(target=self.listen)
self.listen_thread.setDaemon(1)
self.listen_thread.start()
def listen(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", self.port))
s.listen(1)
while True:
try:
clientsock, clientaddr = s.accept()
except:
traceback.print_exc()
continue
self.new_connection(Connection(clientsock))
def new_connection(self, connection):
self.connections.add(connection)
connection.replace_recv_queue(self.recv_queue)
def send(self, message, to="*", exclude=()):
if isinstance(message, Message):
message = struct.pack("!B", message.type_id) + message.pack()
if to == "*":
to = set(self.connections)
for connection in to:
if connection in exclude or not connection: continue
connection.send(message)
def send_chat_message(msg, from_player, to_player=None):
if to_player:
to_id = to_player.player_id
else:
to_id = 0xffff
m = MChatMessage(msg, from_player.player_id, to_id)
if data.THIS_IS_SERVER:
if to_player:
send(m, to=[to_player.connection])
else:
send(m, exclude=[from_player.connection])
else:
net.send(m)
class Writer(object):
def __init__(self, data=None):
if data is None:
data = StringIO()
self.data = data
def single(self, format, *values):
if format[0] not in "@=<>!":
|
self.data.write(struct.pack(format, *values))
def multi(self, format, values):
self.single("!H", len(values))
if len(format.strip("@=<>!")) > 1:
for v in values:
self.single(format, *v)
else:
for v in values:
self.single(format, v)
def string(self, s):
self.multi("s", s)
class Reader(object):
def __init__(self, data):
if not hasattr(data, "read"):
data = StringIO(data)
self.data = data
def single(self, format):
if format[0] not in "@=<>!":
format = "!"+format
return struct.unpack(format, self.data.read(struct.calcsize(format)))
def multi(self, format, limit=1000):
length = self.single("!H")[0]
if length > limit:
raise RuntimeError("multi length (%d) is above limit (%d)!!" %
(length, limit))
if len(format.strip("@=<>!")) > 1:
for i in range(length):
yield self.single(format)
else:
for i in range(length):
yield self.single(format)[0]
def string(self):
return "".join(self.multi("s"))
class Message(object):
type_id = None
struct_format = None
attrs = None
def __init__(self, *pargs, **kwargs):
attrs = list(self.attrs)
for arg in pargs:
n = attrs.pop(0)
setattr(self, n, arg)
for n in attrs:
setattr(self, n, kwargs.pop(n))
if kwargs:
raise TypeError("unexpected keyword argument '%s'" %
kwargs.keys()[0])
def values(self):
l = []
for a in self.attrs:
l.append(getattr(self, a))
return l
@classmethod
def from_stream(cls, stream):
res = struct.unpack(cls.struct_format,
stream.read(struct.calcsize(cls.struct_format)))
return cls(*res)
def pack(self):
return struct.pack(self.struct_format, *[getattr(self, n) for n in
self.attrs])
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__,
dict([(a, getattr(self, a)) for a in self.attrs]))
# Unit stuff
class MUnitPosition(Message):
type_id = 100
struct_format = "!HhhbbBB"
attrs = ('unit_id', 'x', 'y', 'dir_x', 'dir_y', 'state_id', 'holding_id')
# 101 is free
class MUnitAddEmblem(Message):
type_id = 102
attrs = ("unit_id", "image_name", "animate", "offset")
def pack(self):
w = Writer()
w.single("H", self.unit_id)
w.string(self.image_name)
w.single("B", self.animate)
w.single("hh", *self.offset)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), bool(r.single("B")),
r.single("hh"))
class MUnitRemoveEmblem(Message):
type_id = 103
struct_format = "!H"
attrs = ("unit_id",)
# TODO support removing only one emblem, instead of all.
class MWarmth(Message):
type_id = 104
struct_format = "!HB"
attrs = ("unit_id", "warmth")
class MSnowball(Message):
type_id = 105
struct_format = "!HIIIIHHBHH"
attrs = ("unit_id", "start_x", "start_y", "end_x", "end_y", "end_tile_x",
"end_tile_y", "snowballs_left", 'px', 'py')
class MUnitIglooChange(Message):
type_id = 109
struct_format = "!HH"
attrs = ("unit_id", "building_id")
# Building stuff
class MBuildingOwner(Message):
type_id = 110
struct_format = "!HH"
attrs = ('building_id', 'player_id')
class MBuildingJeopardy(Message):
type_id = 111
struct_format = "!HBH"
attrs = ('building_id', 'jeopardy', 'player_taking_over_id')
class MBuildingStorage(Message):
type_id = 112
struct_format = "!Hhh"
attrs = ("building_id", 'fish', 'crystal')
class MCreateDynamicMapObject(Message):
type_id = 114
attrs = ("dmo_id", "image_name", "position", "obstruction", "hidden",
"minimapimage")
def pack(self):
w = Writer()
w.single("H", self.dmo_id)
w.string(self.image_name)
w.single("HH", *self.position)
w.single("B", (self.hidden << 0) + (self.obstruction << 1))
w.string(self.minimapimage)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
dmo_id = r.single("H")[0]
image_name = r.string()
pos = r.single("HH")
v = r.single("B")[0] # Two bools are in one byte.
o = bool(v & 2) # second bit
h = bool(v & 1) # first bit
mini = r.string()
return cls(dmo_id, image_name, pos, o, h, mini)
class MDMOHidden(Message):
type_id = 115
struct_format = "!HB"
attrs = ("dmo_id", "hidden")
class MDMOPosition(Message):
type_id = 116
# TODO
class MResourceQuantity(Message):
type_id = 117
struct_format = "!hhh"
attrs = ("tx", "ty", "q")
# Player/connection stuff
class MNewPlayer(Message):
type_id = 120
attrs = ("player_id", "name", "color", "loading")
def pack(self):
w = Writer()
w.single("H", self.player_id)
w.string(self.name)
w.single("BBB", *self.color)
w.single("B", self.loading)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), r.single("BBB"),
bool(r.single("B")[0]))
class MWhoYouAre(Message):
type_id = 121
struct_format = "!H"
attrs = ("player_id",)
class MSetJob(Message):
type_id = 122
attrs = ("pos", "run_mode", "unit_ids")
def pack(self):
w = Writer()
w.single("HH", *self.pos)
w.single("B", self.run_mode)
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
pos = r.single("HH")
run_mode = bool(r.single("B")[0])
unit_ids = list(r.multi("H"))
return cls(pos, run_mode, unit_ids)
class MDisbanUnits(Message):
type_id = 123
attrs = ("unit_ids",)
def pack(self):
w = Writer()
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(list(r.multi("H")))
class MClientFinishedLoading(Message):
type_id = 124
struct_format = "!H"
attrs = ("player_id",)
class MGameStart(Message):
type_id = 224
struct_format = ""
attrs = ()
class MChatMessage(Message):
type_id = 225
attrs = ("msg", "from_id", "to_id")
def pack(self):
w = Writer()
w.string(self.msg)
# to_id will be 0xffff to specify everyone.
w.single("HH", self.from_id, self.to_id)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string(), *r.single("HH"))
class MPlayerVictory(Message):
type_id = 226
attrs = ("player_id", "victory")
def pack(self):
return struct.pack("!Hs", self.player_id,
{True:'t', False:'f', None:'n'}[self.victory])
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0],
dict(t=True, f=False, n=None)[r.single("s")[0]])
class MMapname(Message):
type_id = 227
attrs = ("mapname",)
def pack(self):
w = Writer()
w.string(self.mapname)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string())
message_classes = [MUnitPosition, MBuildingOwner, MBuildingJeopardy,
MBuildingStorage, MSnowball, MWarmth, MNewPlayer, MWhoYouAre, MSetJob,
MDisbanUnits, MCreateDynamicMapObject, MDMOHidden, MDMOPosition,
MResourceQuantity, MUnitAddEmblem, MUnitRemoveEmblem, MGameStart,
MChatMessage, MUnitIglooChange, MClientFinishedLoading, MPlayerVictory,
MMapname]
indexed_message_classes = {}
for cls in message_classes:
indexed_message_classes[cls.type_id] = cls
class NetLoopBase(object):
def handle_network(self):
while not self.net.recv_queue.empty():
message = self.net.recv_queue.get()
if not self.handle_message(message):
print "Unhandled message: ", message
def handle_message(self, message):
attr = "handle_" + message.__class__.__name__
if hasattr(self, attr):
getattr(self, attr)(message)
return True
else:
return False
class MasterUnit(Unit):
def __init__(self, *pargs, **kwargs):
self.observers = []
Unit.__init__(self, *pargs, **kwargs)
self._last_state_sent = self.state
def _set_warmth(self, w):
self._warmth = w
if hasattr(self, "unit_id"):
send(MWarmth(self.unit_id, w))
warmth = property(lambda self:self._warmth, _set_warmth)
#def _set_max_warmth(self, mw):
#self._max_warmth = mw
#self.call_remote("update_max_warmth", mw)
#max_warmth = property(lambda self:self._max_warmth, _set_max_warmth)
def _set_igloo(self, i):
self._igloo = i
if hasattr(self, "unit_id") and hasattr(i, "building_id"):
send(MUnitIglooChange(self.unit_id, i.building_id))
def move(self):
pos = self.x, self.y
Unit.move(self)
if pos != (self.x, self.y) or (self.state != self._last_state_sent and
self.state not in ['throwing', 'getting_snowball']):
send(MUnitPosition(self.unit_id, self.x, self.y,
self.direction[0], self.direction[1],
unit_states.index(self.state),
holding_types.index(self.holding)))
self._last_state_sent = self.state
def throw_snowball(self, target):
s = Unit.throw_snowball(self, target)
send(MSnowball(self.unit_id, s.start_x, s.start_y, s.end_x, s.end_y,
s.end_tile.x, s.end_tile.y,
self.snowballs, self.x, self.y))
self._last_state_sent = "throwing"
return s
def add_emblem(self, emblem):
m = MUnitAddEmblem(self.unit_id, emblem.image_name, emblem.animate,
emblem.offset)
send(m)
Unit.add_emblem(self, emblem)
def remove_emblem(self, emblem):
if emblem in self._emblems:
send(MUnitRemoveEmblem(self.unit_id))
Unit.remove_emblem(self, emblem)
class SlaveUnit(Unit):
def __init__(self, *pargs, **kwargs):
Unit.__init__(self, *pargs, **kwargs)
# State of the unit will be set later.
self._next_position = None
self._next_snowball = None
def handle_MUnitPosition(self, m):
state = unit_states[m.state_id]
holding = holding_types[m.holding_id]
self._next_position = ((m.x, m.y), (m.dir_x, m.dir_y), state, holding)
def handle_MSnowball(self, m):
self.snowballs = m.snowballs_left
#self.current_frame = 1
dir = self.direction_to_tile(m.end_tile_x, m.end_tile_y)
self._next_position = ((m.px, m.py), dir, "throwing", None)
end_tile = data.map.tiles[(m.end_tile_x, m.end_tile_y)]
self._next_snowball = (m.start_x, m.start_y, m.end_x, m.end_y, end_tile)
def handle_MWarmth(self, m):
self.warmth = m.warmth
def handle_MUnitIglooChange(self, m):
try:
self.igloo = data.buildings[m.building_id]
except KeyError:
pass
def handle_MUnitAddEmblem(self, m):
self.add_emblem(Emblem(m.image_name, m.animate, m.offset))
def handle_MUnitRemoveEmblem(self, m):
for e in list(self._emblems):
self.remove_emblem(e)
def move(self):
startxy = self.x, self.y
try_next_position = True
if self.state == "throwing" and self.current_frame < 9:
# We don't want to process a new state if we are still in a throw.
try_next_position = False
elif self.moving:
time_passed = data.get_ticks()-self.counter
self.offset = int(time_passed/self.walk_speed*32)
if self.offset < 32:
try_next_position = False
if try_next_position:
self.offset = 0
if self._next_position:
p, self.direction, self.state, self.holding = self._next_position
if (self.x, self.y) == p:
self.moving = False
else:
self.moving = True
self.x, self.y = p
self._next_posision = None
if self.state == "throwing" and self._next_snowball:
# We need to throw a snowball!
self.current_frame = 1
data.snowballs.add(snowball.Snowball(*self._next_snowball))
self._next_snowball = None
self.moving = False
if self.snowballs == 0:
self._next_position = ((self.x, self.y), self.direction,
"getting_snowball", None)
self.counter = data.get_ticks()
else:
self.moving = False
endxy = self.x, self.y
if startxy != endxy:
self.move_sprites(startxy, endxy)
class MasterIgloo(Igloo):
def __init__(self, *pargs, **kwargs):
self.observers = set()
Igloo.__init__(self, *pargs, **kwargs)
def _set_player(self, player):
if player != self.player:
Igloo._set_player(self, player)
send(MBuildingOwner(self.building_id, player.player_id))
player = property(Igloo._get_player, _set_player)
def _set_jeopardy(self, j):
self._jeopardy = j
if hasattr(self, 'building_id'):
send(MBuildingJeopardy(self.building_id, j,
self.player_taking_over.player_id))
jeopardy = property((lambda self:self._jeopardy), _set_jeopardy)
def storage_changed(self):
Igloo.storage_changed(self)
m = MBuildingStorage(self.building_id, self.num_storage('fish'),
self.num_storage('crystal'))
send(m)
class SlaveIgloo(Igloo):
def handle_MBuildingOwner(self, m):
# This will change the player of all the units too.
if m.player_id in data.players:
p = data.players[m.player_id]
if p != self.player:
self.player = p
self.region.player = p
def handle_MBuildingJeopardy(self, m):
self.jeopardy = bool(m.jeopardy)
self.player_taking_over = data.players[m.player_taking_over_id]
self.jeopardy_count = data.get_ticks()
def handle_MBuildingStorage(self, m):
self._storage['fish'] = m.fish
self._storage['crystal'] = m.crystal
self.storage_changed()
from plugin import Emblem, DynamicMapObject, BaseDynamicMapObject
class SlaveDynamicMapObject(BaseDynamicMapObject):
def handle_MDMOHidden(self, m):
self.hidden = bool(m.hidden)
| format = "!"+format | conditional_block |
networking.py | from __future__ import division
import struct
from cStringIO import StringIO
#import pyraknet
#from pyraknet import PacketTypes, PacketReliability, PacketPriority
import socket, traceback, os, sys, threading
from Queue import Queue
from unit import Unit
from buildings.igloo import Igloo
from player import Player
import data, snowball
# This will be assigned to the pyraknet Peer instance.
net = None
def send(message, to="*", exclude=()):
"""
This is mostly a shortcut to send a message over the network.
It will fail silently if there is no network connection.
"""
if net:
if isinstance(net, Server):
net.send(message, to, exclude)
else:
net.send(message)
unit_states = ["idle", "walking", "frozen", "throwing", "gathering", "entering",
"inbuilding", "unloading", "getting_snowball", "stray"]
holding_types = [None, "fish", "crystal"]
class Connection(object):
def __init__(self, sock, recv_queue=None):
self.sock = sock
self.send_queue = Queue() # Strings
if recv_queue:
self.recv_queue = recv_queue
else:
self.recv_queue = Queue() # Message objects
# recv_queue might be reassigned in another thread. (e.g., the server
# might want all connections so share a single queue after they have
# been authenticated. Thus, this lock should be held before accessing
# or reassigning the queue. (Unless if it is being accessed in the
# only thread it is ever reassigned in.)
self.recv_queue_lock = threading.Lock()
self.send_thread = threading.Thread(target=self.send_loop)
self.send_thread.setDaemon(1)
self.recv_thread = threading.Thread(target=self.receive_loop)
self.recv_thread.setDaemon(1)
self.send_thread.start()
self.recv_thread.start()
def replace_recv_queue(self, new_queue):
"""
This will assign a new queue for receiving messages for this connection.
All required locking will be done, and any messages left in the old
queue will be added to the new one.
"""
self.recv_queue_lock.acquire()
while not self.recv_queue.empty():
new_queue.put(self.recv_queue.get(True), True)
self.recv_queue = new_queue
self.recv_queue_lock.release()
def send_loop(self):
try:
#bytes = 0
#last_print = 0
while True:
d = self.send_queue.get(True)
self.sock.sendall(d)
#bytes += len(d)
#if data.get_ticks() > last_print + 1000:
#last_print = data.get_ticks()
#print "sent kB:", bytes/1000.
#bytes = 0
except:
print "****** Exception while sending: ****************"
traceback.print_exc()
self.sock.close()
def receive_loop(self):
try:
while True:
type_id = struct.unpack("!B", self.read(1))[0]
cls = indexed_message_classes[type_id]
m = cls.from_stream(self)
m.connection = self
self.recv_queue_lock.acquire()
self.recv_queue.put(m, True)
self.recv_queue_lock.release()
#print "*** Received ****", m
except:
print "****** Exception while receiving: **************"
traceback.print_exc()
self.sock.close()
def send(self, message):
"""
Queues a message up for sending.
Takes a single argument, ``message``, which can either be a ``Message``
object or a string.
"""
if isinstance(message, basestring):
self.send_queue.put(message)
else:
self.send_queue.put(struct.pack("!B", message.type_id) +
message.pack())
def read(self, count):
"""
Reads exactly ``count`` bytes from the socket.
"""
d = self.sock.recv(count, socket.MSG_WAITALL)
assert len(d) == count
return d
class Server(object):
def __init__(self, port):
self.connections = set()
self.recv_queue = Queue()
self.port = port
def start(self):
self.listen_thread = threading.Thread(target=self.listen)
self.listen_thread.setDaemon(1)
self.listen_thread.start()
def listen(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", self.port))
s.listen(1)
while True:
try:
clientsock, clientaddr = s.accept()
except:
traceback.print_exc()
continue
self.new_connection(Connection(clientsock))
def new_connection(self, connection):
self.connections.add(connection)
connection.replace_recv_queue(self.recv_queue)
def send(self, message, to="*", exclude=()):
if isinstance(message, Message):
message = struct.pack("!B", message.type_id) + message.pack()
if to == "*":
to = set(self.connections)
for connection in to:
if connection in exclude or not connection: continue
connection.send(message)
def send_chat_message(msg, from_player, to_player=None):
if to_player:
to_id = to_player.player_id
else:
to_id = 0xffff
m = MChatMessage(msg, from_player.player_id, to_id)
if data.THIS_IS_SERVER:
if to_player:
send(m, to=[to_player.connection])
else:
send(m, exclude=[from_player.connection])
else:
net.send(m)
class Writer(object):
def __init__(self, data=None):
if data is None:
data = StringIO()
self.data = data
def single(self, format, *values):
if format[0] not in "@=<>!":
format = "!"+format
self.data.write(struct.pack(format, *values))
def multi(self, format, values):
self.single("!H", len(values))
if len(format.strip("@=<>!")) > 1:
for v in values:
self.single(format, *v)
else:
for v in values:
self.single(format, v)
def string(self, s):
self.multi("s", s)
class Reader(object):
def __init__(self, data):
if not hasattr(data, "read"):
data = StringIO(data)
self.data = data
def single(self, format):
if format[0] not in "@=<>!":
format = "!"+format
return struct.unpack(format, self.data.read(struct.calcsize(format)))
def multi(self, format, limit=1000):
length = self.single("!H")[0]
if length > limit:
raise RuntimeError("multi length (%d) is above limit (%d)!!" %
(length, limit))
if len(format.strip("@=<>!")) > 1:
for i in range(length):
yield self.single(format)
else:
for i in range(length):
yield self.single(format)[0]
def string(self):
return "".join(self.multi("s"))
class Message(object):
type_id = None
struct_format = None
attrs = None
def __init__(self, *pargs, **kwargs):
|
def values(self):
l = []
for a in self.attrs:
l.append(getattr(self, a))
return l
@classmethod
def from_stream(cls, stream):
res = struct.unpack(cls.struct_format,
stream.read(struct.calcsize(cls.struct_format)))
return cls(*res)
def pack(self):
return struct.pack(self.struct_format, *[getattr(self, n) for n in
self.attrs])
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__,
dict([(a, getattr(self, a)) for a in self.attrs]))
# Unit stuff
class MUnitPosition(Message):
type_id = 100
struct_format = "!HhhbbBB"
attrs = ('unit_id', 'x', 'y', 'dir_x', 'dir_y', 'state_id', 'holding_id')
# 101 is free
class MUnitAddEmblem(Message):
type_id = 102
attrs = ("unit_id", "image_name", "animate", "offset")
def pack(self):
w = Writer()
w.single("H", self.unit_id)
w.string(self.image_name)
w.single("B", self.animate)
w.single("hh", *self.offset)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), bool(r.single("B")),
r.single("hh"))
class MUnitRemoveEmblem(Message):
type_id = 103
struct_format = "!H"
attrs = ("unit_id",)
# TODO support removing only one emblem, instead of all.
class MWarmth(Message):
type_id = 104
struct_format = "!HB"
attrs = ("unit_id", "warmth")
class MSnowball(Message):
type_id = 105
struct_format = "!HIIIIHHBHH"
attrs = ("unit_id", "start_x", "start_y", "end_x", "end_y", "end_tile_x",
"end_tile_y", "snowballs_left", 'px', 'py')
class MUnitIglooChange(Message):
type_id = 109
struct_format = "!HH"
attrs = ("unit_id", "building_id")
# Building stuff
class MBuildingOwner(Message):
type_id = 110
struct_format = "!HH"
attrs = ('building_id', 'player_id')
class MBuildingJeopardy(Message):
type_id = 111
struct_format = "!HBH"
attrs = ('building_id', 'jeopardy', 'player_taking_over_id')
class MBuildingStorage(Message):
type_id = 112
struct_format = "!Hhh"
attrs = ("building_id", 'fish', 'crystal')
class MCreateDynamicMapObject(Message):
type_id = 114
attrs = ("dmo_id", "image_name", "position", "obstruction", "hidden",
"minimapimage")
def pack(self):
w = Writer()
w.single("H", self.dmo_id)
w.string(self.image_name)
w.single("HH", *self.position)
w.single("B", (self.hidden << 0) + (self.obstruction << 1))
w.string(self.minimapimage)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
dmo_id = r.single("H")[0]
image_name = r.string()
pos = r.single("HH")
v = r.single("B")[0] # Two bools are in one byte.
o = bool(v & 2) # second bit
h = bool(v & 1) # first bit
mini = r.string()
return cls(dmo_id, image_name, pos, o, h, mini)
class MDMOHidden(Message):
type_id = 115
struct_format = "!HB"
attrs = ("dmo_id", "hidden")
class MDMOPosition(Message):
type_id = 116
# TODO
class MResourceQuantity(Message):
type_id = 117
struct_format = "!hhh"
attrs = ("tx", "ty", "q")
# Player/connection stuff
class MNewPlayer(Message):
type_id = 120
attrs = ("player_id", "name", "color", "loading")
def pack(self):
w = Writer()
w.single("H", self.player_id)
w.string(self.name)
w.single("BBB", *self.color)
w.single("B", self.loading)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0], r.string(), r.single("BBB"),
bool(r.single("B")[0]))
class MWhoYouAre(Message):
type_id = 121
struct_format = "!H"
attrs = ("player_id",)
class MSetJob(Message):
type_id = 122
attrs = ("pos", "run_mode", "unit_ids")
def pack(self):
w = Writer()
w.single("HH", *self.pos)
w.single("B", self.run_mode)
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
pos = r.single("HH")
run_mode = bool(r.single("B")[0])
unit_ids = list(r.multi("H"))
return cls(pos, run_mode, unit_ids)
class MDisbanUnits(Message):
type_id = 123
attrs = ("unit_ids",)
def pack(self):
w = Writer()
w.multi("H", self.unit_ids)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(list(r.multi("H")))
class MClientFinishedLoading(Message):
type_id = 124
struct_format = "!H"
attrs = ("player_id",)
class MGameStart(Message):
type_id = 224
struct_format = ""
attrs = ()
class MChatMessage(Message):
type_id = 225
attrs = ("msg", "from_id", "to_id")
def pack(self):
w = Writer()
w.string(self.msg)
# to_id will be 0xffff to specify everyone.
w.single("HH", self.from_id, self.to_id)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string(), *r.single("HH"))
class MPlayerVictory(Message):
type_id = 226
attrs = ("player_id", "victory")
def pack(self):
return struct.pack("!Hs", self.player_id,
{True:'t', False:'f', None:'n'}[self.victory])
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.single("H")[0],
dict(t=True, f=False, n=None)[r.single("s")[0]])
class MMapname(Message):
type_id = 227
attrs = ("mapname",)
def pack(self):
w = Writer()
w.string(self.mapname)
return w.data.getvalue()
@classmethod
def from_stream(cls, stream):
r = Reader(stream)
return cls(r.string())
message_classes = [MUnitPosition, MBuildingOwner, MBuildingJeopardy,
MBuildingStorage, MSnowball, MWarmth, MNewPlayer, MWhoYouAre, MSetJob,
MDisbanUnits, MCreateDynamicMapObject, MDMOHidden, MDMOPosition,
MResourceQuantity, MUnitAddEmblem, MUnitRemoveEmblem, MGameStart,
MChatMessage, MUnitIglooChange, MClientFinishedLoading, MPlayerVictory,
MMapname]
indexed_message_classes = {}
for cls in message_classes:
indexed_message_classes[cls.type_id] = cls
class NetLoopBase(object):
def handle_network(self):
while not self.net.recv_queue.empty():
message = self.net.recv_queue.get()
if not self.handle_message(message):
print "Unhandled message: ", message
def handle_message(self, message):
attr = "handle_" + message.__class__.__name__
if hasattr(self, attr):
getattr(self, attr)(message)
return True
else:
return False
class MasterUnit(Unit):
def __init__(self, *pargs, **kwargs):
self.observers = []
Unit.__init__(self, *pargs, **kwargs)
self._last_state_sent = self.state
def _set_warmth(self, w):
self._warmth = w
if hasattr(self, "unit_id"):
send(MWarmth(self.unit_id, w))
warmth = property(lambda self:self._warmth, _set_warmth)
#def _set_max_warmth(self, mw):
#self._max_warmth = mw
#self.call_remote("update_max_warmth", mw)
#max_warmth = property(lambda self:self._max_warmth, _set_max_warmth)
def _set_igloo(self, i):
self._igloo = i
if hasattr(self, "unit_id") and hasattr(i, "building_id"):
send(MUnitIglooChange(self.unit_id, i.building_id))
def move(self):
pos = self.x, self.y
Unit.move(self)
if pos != (self.x, self.y) or (self.state != self._last_state_sent and
self.state not in ['throwing', 'getting_snowball']):
send(MUnitPosition(self.unit_id, self.x, self.y,
self.direction[0], self.direction[1],
unit_states.index(self.state),
holding_types.index(self.holding)))
self._last_state_sent = self.state
def throw_snowball(self, target):
s = Unit.throw_snowball(self, target)
send(MSnowball(self.unit_id, s.start_x, s.start_y, s.end_x, s.end_y,
s.end_tile.x, s.end_tile.y,
self.snowballs, self.x, self.y))
self._last_state_sent = "throwing"
return s
def add_emblem(self, emblem):
m = MUnitAddEmblem(self.unit_id, emblem.image_name, emblem.animate,
emblem.offset)
send(m)
Unit.add_emblem(self, emblem)
def remove_emblem(self, emblem):
if emblem in self._emblems:
send(MUnitRemoveEmblem(self.unit_id))
Unit.remove_emblem(self, emblem)
class SlaveUnit(Unit):
def __init__(self, *pargs, **kwargs):
Unit.__init__(self, *pargs, **kwargs)
# State of the unit will be set later.
self._next_position = None
self._next_snowball = None
def handle_MUnitPosition(self, m):
state = unit_states[m.state_id]
holding = holding_types[m.holding_id]
self._next_position = ((m.x, m.y), (m.dir_x, m.dir_y), state, holding)
def handle_MSnowball(self, m):
self.snowballs = m.snowballs_left
#self.current_frame = 1
dir = self.direction_to_tile(m.end_tile_x, m.end_tile_y)
self._next_position = ((m.px, m.py), dir, "throwing", None)
end_tile = data.map.tiles[(m.end_tile_x, m.end_tile_y)]
self._next_snowball = (m.start_x, m.start_y, m.end_x, m.end_y, end_tile)
def handle_MWarmth(self, m):
self.warmth = m.warmth
def handle_MUnitIglooChange(self, m):
try:
self.igloo = data.buildings[m.building_id]
except KeyError:
pass
def handle_MUnitAddEmblem(self, m):
self.add_emblem(Emblem(m.image_name, m.animate, m.offset))
def handle_MUnitRemoveEmblem(self, m):
for e in list(self._emblems):
self.remove_emblem(e)
def move(self):
startxy = self.x, self.y
try_next_position = True
if self.state == "throwing" and self.current_frame < 9:
# We don't want to process a new state if we are still in a throw.
try_next_position = False
elif self.moving:
time_passed = data.get_ticks()-self.counter
self.offset = int(time_passed/self.walk_speed*32)
if self.offset < 32:
try_next_position = False
if try_next_position:
self.offset = 0
if self._next_position:
p, self.direction, self.state, self.holding = self._next_position
if (self.x, self.y) == p:
self.moving = False
else:
self.moving = True
self.x, self.y = p
self._next_posision = None
if self.state == "throwing" and self._next_snowball:
# We need to throw a snowball!
self.current_frame = 1
data.snowballs.add(snowball.Snowball(*self._next_snowball))
self._next_snowball = None
self.moving = False
if self.snowballs == 0:
self._next_position = ((self.x, self.y), self.direction,
"getting_snowball", None)
self.counter = data.get_ticks()
else:
self.moving = False
endxy = self.x, self.y
if startxy != endxy:
self.move_sprites(startxy, endxy)
class MasterIgloo(Igloo):
def __init__(self, *pargs, **kwargs):
self.observers = set()
Igloo.__init__(self, *pargs, **kwargs)
def _set_player(self, player):
if player != self.player:
Igloo._set_player(self, player)
send(MBuildingOwner(self.building_id, player.player_id))
player = property(Igloo._get_player, _set_player)
def _set_jeopardy(self, j):
self._jeopardy = j
if hasattr(self, 'building_id'):
send(MBuildingJeopardy(self.building_id, j,
self.player_taking_over.player_id))
jeopardy = property((lambda self:self._jeopardy), _set_jeopardy)
def storage_changed(self):
Igloo.storage_changed(self)
m = MBuildingStorage(self.building_id, self.num_storage('fish'),
self.num_storage('crystal'))
send(m)
class SlaveIgloo(Igloo):
def handle_MBuildingOwner(self, m):
# This will change the player of all the units too.
if m.player_id in data.players:
p = data.players[m.player_id]
if p != self.player:
self.player = p
self.region.player = p
def handle_MBuildingJeopardy(self, m):
self.jeopardy = bool(m.jeopardy)
self.player_taking_over = data.players[m.player_taking_over_id]
self.jeopardy_count = data.get_ticks()
def handle_MBuildingStorage(self, m):
self._storage['fish'] = m.fish
self._storage['crystal'] = m.crystal
self.storage_changed()
from plugin import Emblem, DynamicMapObject, BaseDynamicMapObject
class SlaveDynamicMapObject(BaseDynamicMapObject):
def handle_MDMOHidden(self, m):
self.hidden = bool(m.hidden)
| attrs = list(self.attrs)
for arg in pargs:
n = attrs.pop(0)
setattr(self, n, arg)
for n in attrs:
setattr(self, n, kwargs.pop(n))
if kwargs:
raise TypeError("unexpected keyword argument '%s'" %
kwargs.keys()[0]) | identifier_body |
async_await_basics.rs | use futures::executor::block_on;
use std::thread::Thread;
use std::sync::mpsc;
use futures::join;
use {
std::{
pin::Pin,
task::Waker,
thread,
},
};
use {
futures::{
future::{FutureExt, BoxFuture},
task::{ArcWake, waker_ref},
},
std::{
future::Future,
sync::{Arc, Mutex},
sync::mpsc::{sync_channel, SyncSender, Receiver},
task::{Context, Poll},
time::Duration,
},
};
fn async_await_basics_main() {
println!("Hello, world!");
block_on(async_main());
let (executor, spawner) = new_executor_and_spawner();
// Spawn a task to print before and after waiting on a timer.
spawner.spawn(async {
println!("howdy!");
// Wait for our timer future to complete after two seconds.
TimerFuture::new(Duration::new(2, 0)).await;
println!("done!");
});
// Drop the spawner so that our executor knows it is finished and won't
// receive more incoming tasks to run.
drop(spawner);
// Run the executor until the task queue is empty.
// This will print "howdy!", pause, and then print "done!".
executor.run();
}
struct Song {
name: String
}
impl Song {
fn new() -> Song {
Song { name: String::from("Hotel California") }
}
}
async fn learn_song() -> Song {
println!("Learning Song!");
//std::thread::sleep(Duration::from_secs(2));
println!("Good Progress!");
//std::thread::sleep(Duration::from_secs(1));
Song::new()
}
async fn sing_song(song: Song) {
println!("Tune instruments! {}", song.name);
//std::thread::sleep(Duration::from_secs(1));
println!("Singing Song! {}", song.name);
}
async fn dance() {
println!("Dance!!")
}
async fn learn_and_sing() {
let song = learn_song().await;
sing_song(song).await;
}
async fn async_main() {
let f2 = dance();
let f1 = learn_and_sing();
futures::join!(f2, f1);
}
// Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures
// that have been submitted to an executor.
// Waker provides a wake() method that can be used to tell the executor that the associated task
// should be awoken. When wake() is called, the executor knows that the task associated with the Waker
// is ready to make progress, and its future should be polled again.
// Waker also implements clone() so that it can be copied around and stored.
pub struct | {
shared_state: Arc<Mutex<SharedState>>,
}
/// Shared state between the future and the waiting thread
struct SharedState {
/// Whether or not the sleep time has elapsed
completed: bool,
/// The waker for the task that `TimerFuture` is running on.
/// The thread can use this after setting `completed = true` to tell
/// `TimerFuture`'s task to wake up, see that `completed = true`, and
/// move forward.
waker: Option<Waker>,
}
impl Future for TimerFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Look at the shared state to see if the timer has already completed.
let mut shared_state = self.shared_state.lock().unwrap();
if shared_state.completed {
Poll::Ready(())
} else {
// Set waker so that the thread can wake up the current task
// when the timer has completed, ensuring that the future is polled
// again and sees that `completed = true`.
//
// It's tempting to do this once rather than repeatedly cloning
// the waker each time. However, the `TimerFuture` can move between
// tasks on the executor, which could cause a stale waker pointing
// to the wrong task, preventing `TimerFuture` from waking up
// correctly.
//
// N.B. it's possible to check for this using the `Waker::will_wake`
// function, but we omit that here to keep things simple.
shared_state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
impl TimerFuture {
/// Create a new `TimerFuture` which will complete after the provided
/// timeout.
pub fn new(duration: Duration) -> Self {
let shared_state = Arc::new(Mutex::new(SharedState {
completed: false,
waker: None,
}));
// Spawn the new thread
let thread_shared_state = shared_state.clone();
thread::spawn(move || {
thread::sleep(duration);
let mut shared_state = thread_shared_state.lock().unwrap();
// Signal that the timer has completed and wake up the last
// task on which the future was polled, if one exists.
shared_state.completed = true;
if let Some(waker) = shared_state.waker.take() {
waker.wake()
}
});
TimerFuture { shared_state }
}
}
/// Task executor that receives tasks off of a channel and runs them.
struct Executor {
ready_queue: Receiver<Arc<Task>>,
}
/// `Spawner` spawns new futures onto the task channel.
#[derive(Clone)]
struct Spawner {
task_sender: SyncSender<Arc<Task>>,
}
/// A future that can reschedule itself to be polled by an `Executor`.
struct Task {
/// In-progress future that should be pushed to completion.
///
/// The `Mutex` is not necessary for correctness, since we only have
/// one thread executing tasks at once. However, Rust isn't smart
/// enough to know that `future` is only mutated from one thread,
/// so we need use the `Mutex` to prove thread-safety. A production
/// executor would not need this, and could use `UnsafeCell` instead.
future: Mutex<Option<BoxFuture<'static, ()>>>,
/// Handle to place the task itself back onto the task queue.
task_sender: SyncSender<Arc<Task>>,
}
fn new_executor_and_spawner() -> (Executor, Spawner) {
// Maximum number of tasks to allow queueing in the channel at once.
// This is just to make `sync_channel` happy, and wouldn't be present in
// a real executor.
const MAX_QUEUED_TASKS: usize = 10_000;
let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS);
(Executor { ready_queue }, Spawner { task_sender })
}
impl Spawner {
fn spawn(&self, future: impl Future<Output=()> + 'static + Send) {
let future = future.boxed();
let task = Arc::new(Task {
future: Mutex::new(Some(future)),
task_sender: self.task_sender.clone(),
});
self.task_sender.send(task).expect("too many tasks queued");
}
}
// To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers
// are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers
// tell the executor exactly which task has become ready, allowing them to poll just the futures that
// are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake
// trait and then using the waker_ref or .into_waker() functions to turn an Arc<impl ArcWake> into a
// Waker.
impl ArcWake for Task {
fn wake_by_ref(arc_self: &Arc<Self>) {
// Implement `wake` by sending this task back onto the task channel
// so that it will be polled again by the executor.
let cloned = arc_self.clone();
arc_self.task_sender.send(cloned).expect("too many tasks queued");
}
}
impl Executor {
fn run(&self) {
while let Ok(task) = self.ready_queue.recv() {
// Take the future, and if it has not yet completed (is still Some),
// poll it in an attempt to complete it.
let mut future_slot = task.future.lock().unwrap();
if let Some(mut future) = future_slot.take() {
// Create a `LocalWaker` from the task itself
let waker = waker_ref(&task);
let context = &mut Context::from_waker(&*waker);
// `BoxFuture<T>` is a type alias for
// `Pin<Box<dyn Future<Output = T> + Send + 'static>>`.
// We can get a `Pin<&mut dyn Future + Send + 'static>`
// from it by calling the `Pin::as_mut` method.
if let Poll::Pending = future.as_mut().poll(context) {
// We're not done processing the future, so put it
// back in its task to be run again in the future.
*future_slot = Some(future);
}
}
}
}
}
// In practice, this problem is solved through integration with an IO-aware system blocking primitive,
// such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all
// of which are exposed through the cross-platform Rust crate mio). These primitives all allow a
// thread to block on multiple asynchronous IO events, returning once one of the events completes.
// In practice, these APIs usually look something like this:
/*struct IoBlocker {
/* ... */
}
struct Event {
// An ID uniquely identifying the event that occurred and was listened for.
id: usize,
// A set of signals to wait for, or which occurred.
signals: Signals,
}
impl IoBlocker {
/// Create a new collection of asynchronous IO events to block on.
fn new() -> Self { /* ... */ }
/// Express an interest in a particular IO event.
fn add_io_event_interest(
&self,
/// The object on which the event will occur
io_object: &IoObject,
/// A set of signals that may appear on the `io_object` for
/// which an event should be triggered, paired with
/// an ID to give to events that result from this interest.
event: Event,
) { /* ... */ }
/// Block until one of the events occurs.
fn block(&self) -> Event { /* ... */ }
}
let mut io_blocker = IoBlocker::new();
io_blocker.add_io_event_interest(
&socket_1,
Event { id: 1, signals: READABLE },
);
io_blocker.add_io_event_interest(
&socket_2,
Event { id: 2, signals: READABLE | WRITABLE },
);
let event = io_blocker.block();
// prints e.g. "Socket 1 is now READABLE" if socket one became readable.
println!("Socket {:?} is now {:?}", event.id, event.signals);
*/
/*
Futures executors can use these primitives to provide asynchronous IO objects such as sockets that
can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead
example above, the Socket::set_readable_callback function might look like the following pseudocode:
impl Socket {
fn set_readable_callback(&self, waker: Waker) {
// `local_executor` is a reference to the local executor.
// this could be provided at creation of the socket, but in practice
// many executor implementations pass it down through thread local
// storage for convenience.
let local_executor = self.local_executor;
// Unique ID for this IO object.
let id = self.id;
// Store the local waker in the executor's map so that it can be called
// once the IO event arrives.
local_executor.event_map.insert(id, waker);
local_executor.add_io_event_interest(
&self.socket_file_descriptor,
Event { id, signals: READABLE },
);
}
}
We can now have just one executor thread which can receive and dispatch any IO event to the appropriate
Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion
before returning to check for more IO events (and the cycle continues...).
*/
// Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an .await, as
// it can cause the threadpool to lock up: one task could take out a lock, .await and yield to the
// executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this,
// use the Mutex in futures::lock rather than the one from std::sync. | TimerFuture | identifier_name |
async_await_basics.rs | use futures::executor::block_on;
use std::thread::Thread;
use std::sync::mpsc;
use futures::join;
use {
std::{
pin::Pin,
task::Waker,
thread,
},
};
use {
futures::{
future::{FutureExt, BoxFuture},
task::{ArcWake, waker_ref},
},
std::{
future::Future,
sync::{Arc, Mutex},
sync::mpsc::{sync_channel, SyncSender, Receiver},
task::{Context, Poll},
time::Duration,
},
};
fn async_await_basics_main() {
println!("Hello, world!");
block_on(async_main());
let (executor, spawner) = new_executor_and_spawner();
// Spawn a task to print before and after waiting on a timer.
spawner.spawn(async {
println!("howdy!");
// Wait for our timer future to complete after two seconds.
TimerFuture::new(Duration::new(2, 0)).await;
println!("done!");
});
// Drop the spawner so that our executor knows it is finished and won't
// receive more incoming tasks to run.
drop(spawner);
// Run the executor until the task queue is empty.
// This will print "howdy!", pause, and then print "done!".
executor.run();
}
struct Song {
name: String
}
impl Song {
fn new() -> Song {
Song { name: String::from("Hotel California") }
}
}
async fn learn_song() -> Song {
println!("Learning Song!");
//std::thread::sleep(Duration::from_secs(2));
println!("Good Progress!");
//std::thread::sleep(Duration::from_secs(1));
Song::new()
}
async fn sing_song(song: Song) {
println!("Tune instruments! {}", song.name);
//std::thread::sleep(Duration::from_secs(1));
println!("Singing Song! {}", song.name);
}
async fn dance() {
println!("Dance!!")
}
async fn learn_and_sing() {
let song = learn_song().await;
sing_song(song).await;
}
async fn async_main() {
let f2 = dance();
let f1 = learn_and_sing();
futures::join!(f2, f1);
}
// Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures
// that have been submitted to an executor.
// Waker provides a wake() method that can be used to tell the executor that the associated task
// should be awoken. When wake() is called, the executor knows that the task associated with the Waker
// is ready to make progress, and its future should be polled again.
// Waker also implements clone() so that it can be copied around and stored.
pub struct TimerFuture {
shared_state: Arc<Mutex<SharedState>>,
}
/// Shared state between the future and the waiting thread
struct SharedState {
/// Whether or not the sleep time has elapsed
completed: bool,
/// The waker for the task that `TimerFuture` is running on.
/// The thread can use this after setting `completed = true` to tell
/// `TimerFuture`'s task to wake up, see that `completed = true`, and
/// move forward.
waker: Option<Waker>,
}
impl Future for TimerFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Look at the shared state to see if the timer has already completed.
let mut shared_state = self.shared_state.lock().unwrap();
if shared_state.completed {
Poll::Ready(())
} else {
// Set waker so that the thread can wake up the current task
// when the timer has completed, ensuring that the future is polled
// again and sees that `completed = true`.
//
// It's tempting to do this once rather than repeatedly cloning
// the waker each time. However, the `TimerFuture` can move between
// tasks on the executor, which could cause a stale waker pointing
// to the wrong task, preventing `TimerFuture` from waking up
// correctly. | // function, but we omit that here to keep things simple.
shared_state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
impl TimerFuture {
/// Create a new `TimerFuture` which will complete after the provided
/// timeout.
pub fn new(duration: Duration) -> Self {
let shared_state = Arc::new(Mutex::new(SharedState {
completed: false,
waker: None,
}));
// Spawn the new thread
let thread_shared_state = shared_state.clone();
thread::spawn(move || {
thread::sleep(duration);
let mut shared_state = thread_shared_state.lock().unwrap();
// Signal that the timer has completed and wake up the last
// task on which the future was polled, if one exists.
shared_state.completed = true;
if let Some(waker) = shared_state.waker.take() {
waker.wake()
}
});
TimerFuture { shared_state }
}
}
/// Task executor that receives tasks off of a channel and runs them.
struct Executor {
ready_queue: Receiver<Arc<Task>>,
}
/// `Spawner` spawns new futures onto the task channel.
#[derive(Clone)]
struct Spawner {
task_sender: SyncSender<Arc<Task>>,
}
/// A future that can reschedule itself to be polled by an `Executor`.
struct Task {
/// In-progress future that should be pushed to completion.
///
/// The `Mutex` is not necessary for correctness, since we only have
/// one thread executing tasks at once. However, Rust isn't smart
/// enough to know that `future` is only mutated from one thread,
/// so we need use the `Mutex` to prove thread-safety. A production
/// executor would not need this, and could use `UnsafeCell` instead.
future: Mutex<Option<BoxFuture<'static, ()>>>,
/// Handle to place the task itself back onto the task queue.
task_sender: SyncSender<Arc<Task>>,
}
fn new_executor_and_spawner() -> (Executor, Spawner) {
// Maximum number of tasks to allow queueing in the channel at once.
// This is just to make `sync_channel` happy, and wouldn't be present in
// a real executor.
const MAX_QUEUED_TASKS: usize = 10_000;
let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS);
(Executor { ready_queue }, Spawner { task_sender })
}
impl Spawner {
fn spawn(&self, future: impl Future<Output=()> + 'static + Send) {
let future = future.boxed();
let task = Arc::new(Task {
future: Mutex::new(Some(future)),
task_sender: self.task_sender.clone(),
});
self.task_sender.send(task).expect("too many tasks queued");
}
}
// To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers
// are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers
// tell the executor exactly which task has become ready, allowing them to poll just the futures that
// are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake
// trait and then using the waker_ref or .into_waker() functions to turn an Arc<impl ArcWake> into a
// Waker.
impl ArcWake for Task {
fn wake_by_ref(arc_self: &Arc<Self>) {
// Implement `wake` by sending this task back onto the task channel
// so that it will be polled again by the executor.
let cloned = arc_self.clone();
arc_self.task_sender.send(cloned).expect("too many tasks queued");
}
}
impl Executor {
fn run(&self) {
while let Ok(task) = self.ready_queue.recv() {
// Take the future, and if it has not yet completed (is still Some),
// poll it in an attempt to complete it.
let mut future_slot = task.future.lock().unwrap();
if let Some(mut future) = future_slot.take() {
// Create a `LocalWaker` from the task itself
let waker = waker_ref(&task);
let context = &mut Context::from_waker(&*waker);
// `BoxFuture<T>` is a type alias for
// `Pin<Box<dyn Future<Output = T> + Send + 'static>>`.
// We can get a `Pin<&mut dyn Future + Send + 'static>`
// from it by calling the `Pin::as_mut` method.
if let Poll::Pending = future.as_mut().poll(context) {
// We're not done processing the future, so put it
// back in its task to be run again in the future.
*future_slot = Some(future);
}
}
}
}
}
// In practice, this problem is solved through integration with an IO-aware system blocking primitive,
// such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all
// of which are exposed through the cross-platform Rust crate mio). These primitives all allow a
// thread to block on multiple asynchronous IO events, returning once one of the events completes.
// In practice, these APIs usually look something like this:
/*struct IoBlocker {
/* ... */
}
struct Event {
// An ID uniquely identifying the event that occurred and was listened for.
id: usize,
// A set of signals to wait for, or which occurred.
signals: Signals,
}
impl IoBlocker {
/// Create a new collection of asynchronous IO events to block on.
fn new() -> Self { /* ... */ }
/// Express an interest in a particular IO event.
fn add_io_event_interest(
&self,
/// The object on which the event will occur
io_object: &IoObject,
/// A set of signals that may appear on the `io_object` for
/// which an event should be triggered, paired with
/// an ID to give to events that result from this interest.
event: Event,
) { /* ... */ }
/// Block until one of the events occurs.
fn block(&self) -> Event { /* ... */ }
}
let mut io_blocker = IoBlocker::new();
io_blocker.add_io_event_interest(
&socket_1,
Event { id: 1, signals: READABLE },
);
io_blocker.add_io_event_interest(
&socket_2,
Event { id: 2, signals: READABLE | WRITABLE },
);
let event = io_blocker.block();
// prints e.g. "Socket 1 is now READABLE" if socket one became readable.
println!("Socket {:?} is now {:?}", event.id, event.signals);
*/
/*
Futures executors can use these primitives to provide asynchronous IO objects such as sockets that
can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead
example above, the Socket::set_readable_callback function might look like the following pseudocode:
impl Socket {
fn set_readable_callback(&self, waker: Waker) {
// `local_executor` is a reference to the local executor.
// this could be provided at creation of the socket, but in practice
// many executor implementations pass it down through thread local
// storage for convenience.
let local_executor = self.local_executor;
// Unique ID for this IO object.
let id = self.id;
// Store the local waker in the executor's map so that it can be called
// once the IO event arrives.
local_executor.event_map.insert(id, waker);
local_executor.add_io_event_interest(
&self.socket_file_descriptor,
Event { id, signals: READABLE },
);
}
}
We can now have just one executor thread which can receive and dispatch any IO event to the appropriate
Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion
before returning to check for more IO events (and the cycle continues...).
*/
// Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an .await, as
// it can cause the threadpool to lock up: one task could take out a lock, .await and yield to the
// executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this,
// use the Mutex in futures::lock rather than the one from std::sync. | //
// N.B. it's possible to check for this using the `Waker::will_wake` | random_line_split |
async_await_basics.rs | use futures::executor::block_on;
use std::thread::Thread;
use std::sync::mpsc;
use futures::join;
use {
std::{
pin::Pin,
task::Waker,
thread,
},
};
use {
futures::{
future::{FutureExt, BoxFuture},
task::{ArcWake, waker_ref},
},
std::{
future::Future,
sync::{Arc, Mutex},
sync::mpsc::{sync_channel, SyncSender, Receiver},
task::{Context, Poll},
time::Duration,
},
};
fn async_await_basics_main() {
println!("Hello, world!");
block_on(async_main());
let (executor, spawner) = new_executor_and_spawner();
// Spawn a task to print before and after waiting on a timer.
spawner.spawn(async {
println!("howdy!");
// Wait for our timer future to complete after two seconds.
TimerFuture::new(Duration::new(2, 0)).await;
println!("done!");
});
// Drop the spawner so that our executor knows it is finished and won't
// receive more incoming tasks to run.
drop(spawner);
// Run the executor until the task queue is empty.
// This will print "howdy!", pause, and then print "done!".
executor.run();
}
struct Song {
name: String
}
impl Song {
fn new() -> Song {
Song { name: String::from("Hotel California") }
}
}
async fn learn_song() -> Song {
println!("Learning Song!");
//std::thread::sleep(Duration::from_secs(2));
println!("Good Progress!");
//std::thread::sleep(Duration::from_secs(1));
Song::new()
}
async fn sing_song(song: Song) {
println!("Tune instruments! {}", song.name);
//std::thread::sleep(Duration::from_secs(1));
println!("Singing Song! {}", song.name);
}
async fn dance() |
async fn learn_and_sing() {
let song = learn_song().await;
sing_song(song).await;
}
async fn async_main() {
let f2 = dance();
let f1 = learn_and_sing();
futures::join!(f2, f1);
}
// Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures
// that have been submitted to an executor.
// Waker provides a wake() method that can be used to tell the executor that the associated task
// should be awoken. When wake() is called, the executor knows that the task associated with the Waker
// is ready to make progress, and its future should be polled again.
// Waker also implements clone() so that it can be copied around and stored.
pub struct TimerFuture {
shared_state: Arc<Mutex<SharedState>>,
}
/// Shared state between the future and the waiting thread
struct SharedState {
/// Whether or not the sleep time has elapsed
completed: bool,
/// The waker for the task that `TimerFuture` is running on.
/// The thread can use this after setting `completed = true` to tell
/// `TimerFuture`'s task to wake up, see that `completed = true`, and
/// move forward.
waker: Option<Waker>,
}
impl Future for TimerFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Look at the shared state to see if the timer has already completed.
let mut shared_state = self.shared_state.lock().unwrap();
if shared_state.completed {
Poll::Ready(())
} else {
// Set waker so that the thread can wake up the current task
// when the timer has completed, ensuring that the future is polled
// again and sees that `completed = true`.
//
// It's tempting to do this once rather than repeatedly cloning
// the waker each time. However, the `TimerFuture` can move between
// tasks on the executor, which could cause a stale waker pointing
// to the wrong task, preventing `TimerFuture` from waking up
// correctly.
//
// N.B. it's possible to check for this using the `Waker::will_wake`
// function, but we omit that here to keep things simple.
shared_state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
impl TimerFuture {
/// Create a new `TimerFuture` which will complete after the provided
/// timeout.
pub fn new(duration: Duration) -> Self {
let shared_state = Arc::new(Mutex::new(SharedState {
completed: false,
waker: None,
}));
// Spawn the new thread
let thread_shared_state = shared_state.clone();
thread::spawn(move || {
thread::sleep(duration);
let mut shared_state = thread_shared_state.lock().unwrap();
// Signal that the timer has completed and wake up the last
// task on which the future was polled, if one exists.
shared_state.completed = true;
if let Some(waker) = shared_state.waker.take() {
waker.wake()
}
});
TimerFuture { shared_state }
}
}
/// Task executor that receives tasks off of a channel and runs them.
struct Executor {
ready_queue: Receiver<Arc<Task>>,
}
/// `Spawner` spawns new futures onto the task channel.
#[derive(Clone)]
struct Spawner {
task_sender: SyncSender<Arc<Task>>,
}
/// A future that can reschedule itself to be polled by an `Executor`.
struct Task {
/// In-progress future that should be pushed to completion.
///
/// The `Mutex` is not necessary for correctness, since we only have
/// one thread executing tasks at once. However, Rust isn't smart
/// enough to know that `future` is only mutated from one thread,
/// so we need use the `Mutex` to prove thread-safety. A production
/// executor would not need this, and could use `UnsafeCell` instead.
future: Mutex<Option<BoxFuture<'static, ()>>>,
/// Handle to place the task itself back onto the task queue.
task_sender: SyncSender<Arc<Task>>,
}
fn new_executor_and_spawner() -> (Executor, Spawner) {
// Maximum number of tasks to allow queueing in the channel at once.
// This is just to make `sync_channel` happy, and wouldn't be present in
// a real executor.
const MAX_QUEUED_TASKS: usize = 10_000;
let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS);
(Executor { ready_queue }, Spawner { task_sender })
}
impl Spawner {
fn spawn(&self, future: impl Future<Output=()> + 'static + Send) {
let future = future.boxed();
let task = Arc::new(Task {
future: Mutex::new(Some(future)),
task_sender: self.task_sender.clone(),
});
self.task_sender.send(task).expect("too many tasks queued");
}
}
// To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers
// are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers
// tell the executor exactly which task has become ready, allowing them to poll just the futures that
// are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake
// trait and then using the waker_ref or .into_waker() functions to turn an Arc<impl ArcWake> into a
// Waker.
impl ArcWake for Task {
fn wake_by_ref(arc_self: &Arc<Self>) {
// Implement `wake` by sending this task back onto the task channel
// so that it will be polled again by the executor.
let cloned = arc_self.clone();
arc_self.task_sender.send(cloned).expect("too many tasks queued");
}
}
impl Executor {
fn run(&self) {
while let Ok(task) = self.ready_queue.recv() {
// Take the future, and if it has not yet completed (is still Some),
// poll it in an attempt to complete it.
let mut future_slot = task.future.lock().unwrap();
if let Some(mut future) = future_slot.take() {
// Create a `LocalWaker` from the task itself
let waker = waker_ref(&task);
let context = &mut Context::from_waker(&*waker);
// `BoxFuture<T>` is a type alias for
// `Pin<Box<dyn Future<Output = T> + Send + 'static>>`.
// We can get a `Pin<&mut dyn Future + Send + 'static>`
// from it by calling the `Pin::as_mut` method.
if let Poll::Pending = future.as_mut().poll(context) {
// We're not done processing the future, so put it
// back in its task to be run again in the future.
*future_slot = Some(future);
}
}
}
}
}
// In practice, this problem is solved through integration with an IO-aware system blocking primitive,
// such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all
// of which are exposed through the cross-platform Rust crate mio). These primitives all allow a
// thread to block on multiple asynchronous IO events, returning once one of the events completes.
// In practice, these APIs usually look something like this:
/*struct IoBlocker {
/* ... */
}
struct Event {
// An ID uniquely identifying the event that occurred and was listened for.
id: usize,
// A set of signals to wait for, or which occurred.
signals: Signals,
}
impl IoBlocker {
/// Create a new collection of asynchronous IO events to block on.
fn new() -> Self { /* ... */ }
/// Express an interest in a particular IO event.
fn add_io_event_interest(
&self,
/// The object on which the event will occur
io_object: &IoObject,
/// A set of signals that may appear on the `io_object` for
/// which an event should be triggered, paired with
/// an ID to give to events that result from this interest.
event: Event,
) { /* ... */ }
/// Block until one of the events occurs.
fn block(&self) -> Event { /* ... */ }
}
let mut io_blocker = IoBlocker::new();
io_blocker.add_io_event_interest(
&socket_1,
Event { id: 1, signals: READABLE },
);
io_blocker.add_io_event_interest(
&socket_2,
Event { id: 2, signals: READABLE | WRITABLE },
);
let event = io_blocker.block();
// prints e.g. "Socket 1 is now READABLE" if socket one became readable.
println!("Socket {:?} is now {:?}", event.id, event.signals);
*/
/*
Futures executors can use these primitives to provide asynchronous IO objects such as sockets that
can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead
example above, the Socket::set_readable_callback function might look like the following pseudocode:
impl Socket {
fn set_readable_callback(&self, waker: Waker) {
// `local_executor` is a reference to the local executor.
// this could be provided at creation of the socket, but in practice
// many executor implementations pass it down through thread local
// storage for convenience.
let local_executor = self.local_executor;
// Unique ID for this IO object.
let id = self.id;
// Store the local waker in the executor's map so that it can be called
// once the IO event arrives.
local_executor.event_map.insert(id, waker);
local_executor.add_io_event_interest(
&self.socket_file_descriptor,
Event { id, signals: READABLE },
);
}
}
We can now have just one executor thread which can receive and dispatch any IO event to the appropriate
Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion
before returning to check for more IO events (and the cycle continues...).
*/
// Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an .await, as
// it can cause the threadpool to lock up: one task could take out a lock, .await and yield to the
// executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this,
// use the Mutex in futures::lock rather than the one from std::sync. | {
println!("Dance!!")
} | identifier_body |
async_await_basics.rs | use futures::executor::block_on;
use std::thread::Thread;
use std::sync::mpsc;
use futures::join;
use {
std::{
pin::Pin,
task::Waker,
thread,
},
};
use {
futures::{
future::{FutureExt, BoxFuture},
task::{ArcWake, waker_ref},
},
std::{
future::Future,
sync::{Arc, Mutex},
sync::mpsc::{sync_channel, SyncSender, Receiver},
task::{Context, Poll},
time::Duration,
},
};
fn async_await_basics_main() {
println!("Hello, world!");
block_on(async_main());
let (executor, spawner) = new_executor_and_spawner();
// Spawn a task to print before and after waiting on a timer.
spawner.spawn(async {
println!("howdy!");
// Wait for our timer future to complete after two seconds.
TimerFuture::new(Duration::new(2, 0)).await;
println!("done!");
});
// Drop the spawner so that our executor knows it is finished and won't
// receive more incoming tasks to run.
drop(spawner);
// Run the executor until the task queue is empty.
// This will print "howdy!", pause, and then print "done!".
executor.run();
}
struct Song {
name: String
}
impl Song {
fn new() -> Song {
Song { name: String::from("Hotel California") }
}
}
async fn learn_song() -> Song {
println!("Learning Song!");
//std::thread::sleep(Duration::from_secs(2));
println!("Good Progress!");
//std::thread::sleep(Duration::from_secs(1));
Song::new()
}
async fn sing_song(song: Song) {
println!("Tune instruments! {}", song.name);
//std::thread::sleep(Duration::from_secs(1));
println!("Singing Song! {}", song.name);
}
async fn dance() {
println!("Dance!!")
}
async fn learn_and_sing() {
let song = learn_song().await;
sing_song(song).await;
}
async fn async_main() {
let f2 = dance();
let f1 = learn_and_sing();
futures::join!(f2, f1);
}
// Each time a future is polled, it is polled as part of a "task". Tasks are the top-level futures
// that have been submitted to an executor.
// Waker provides a wake() method that can be used to tell the executor that the associated task
// should be awoken. When wake() is called, the executor knows that the task associated with the Waker
// is ready to make progress, and its future should be polled again.
// Waker also implements clone() so that it can be copied around and stored.
pub struct TimerFuture {
shared_state: Arc<Mutex<SharedState>>,
}
/// Shared state between the future and the waiting thread
struct SharedState {
/// Whether or not the sleep time has elapsed
completed: bool,
/// The waker for the task that `TimerFuture` is running on.
/// The thread can use this after setting `completed = true` to tell
/// `TimerFuture`'s task to wake up, see that `completed = true`, and
/// move forward.
waker: Option<Waker>,
}
impl Future for TimerFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// Look at the shared state to see if the timer has already completed.
let mut shared_state = self.shared_state.lock().unwrap();
if shared_state.completed {
Poll::Ready(())
} else {
// Set waker so that the thread can wake up the current task
// when the timer has completed, ensuring that the future is polled
// again and sees that `completed = true`.
//
// It's tempting to do this once rather than repeatedly cloning
// the waker each time. However, the `TimerFuture` can move between
// tasks on the executor, which could cause a stale waker pointing
// to the wrong task, preventing `TimerFuture` from waking up
// correctly.
//
// N.B. it's possible to check for this using the `Waker::will_wake`
// function, but we omit that here to keep things simple.
shared_state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
impl TimerFuture {
/// Create a new `TimerFuture` which will complete after the provided
/// timeout.
pub fn new(duration: Duration) -> Self {
let shared_state = Arc::new(Mutex::new(SharedState {
completed: false,
waker: None,
}));
// Spawn the new thread
let thread_shared_state = shared_state.clone();
thread::spawn(move || {
thread::sleep(duration);
let mut shared_state = thread_shared_state.lock().unwrap();
// Signal that the timer has completed and wake up the last
// task on which the future was polled, if one exists.
shared_state.completed = true;
if let Some(waker) = shared_state.waker.take() {
waker.wake()
}
});
TimerFuture { shared_state }
}
}
/// Task executor that receives tasks off of a channel and runs them.
struct Executor {
ready_queue: Receiver<Arc<Task>>,
}
/// `Spawner` spawns new futures onto the task channel.
#[derive(Clone)]
struct Spawner {
task_sender: SyncSender<Arc<Task>>,
}
/// A future that can reschedule itself to be polled by an `Executor`.
struct Task {
/// In-progress future that should be pushed to completion.
///
/// The `Mutex` is not necessary for correctness, since we only have
/// one thread executing tasks at once. However, Rust isn't smart
/// enough to know that `future` is only mutated from one thread,
/// so we need use the `Mutex` to prove thread-safety. A production
/// executor would not need this, and could use `UnsafeCell` instead.
future: Mutex<Option<BoxFuture<'static, ()>>>,
/// Handle to place the task itself back onto the task queue.
task_sender: SyncSender<Arc<Task>>,
}
fn new_executor_and_spawner() -> (Executor, Spawner) {
// Maximum number of tasks to allow queueing in the channel at once.
// This is just to make `sync_channel` happy, and wouldn't be present in
// a real executor.
const MAX_QUEUED_TASKS: usize = 10_000;
let (task_sender, ready_queue) = sync_channel(MAX_QUEUED_TASKS);
(Executor { ready_queue }, Spawner { task_sender })
}
impl Spawner {
fn spawn(&self, future: impl Future<Output=()> + 'static + Send) {
let future = future.boxed();
let task = Arc::new(Task {
future: Mutex::new(Some(future)),
task_sender: self.task_sender.clone(),
});
self.task_sender.send(task).expect("too many tasks queued");
}
}
// To poll futures, we'll need to create a Waker. As discussed in the task wakeups section, Wakers
// are responsible for scheduling a task to be polled again once wake is called. Remember that Wakers
// tell the executor exactly which task has become ready, allowing them to poll just the futures that
// are ready to make progress. The easiest way to create a new Waker is by implementing the ArcWake
// trait and then using the waker_ref or .into_waker() functions to turn an Arc<impl ArcWake> into a
// Waker.
impl ArcWake for Task {
fn wake_by_ref(arc_self: &Arc<Self>) {
// Implement `wake` by sending this task back onto the task channel
// so that it will be polled again by the executor.
let cloned = arc_self.clone();
arc_self.task_sender.send(cloned).expect("too many tasks queued");
}
}
impl Executor {
fn run(&self) {
while let Ok(task) = self.ready_queue.recv() {
// Take the future, and if it has not yet completed (is still Some),
// poll it in an attempt to complete it.
let mut future_slot = task.future.lock().unwrap();
if let Some(mut future) = future_slot.take() {
// Create a `LocalWaker` from the task itself
let waker = waker_ref(&task);
let context = &mut Context::from_waker(&*waker);
// `BoxFuture<T>` is a type alias for
// `Pin<Box<dyn Future<Output = T> + Send + 'static>>`.
// We can get a `Pin<&mut dyn Future + Send + 'static>`
// from it by calling the `Pin::as_mut` method.
if let Poll::Pending = future.as_mut().poll(context) |
}
}
}
}
// In practice, this problem is solved through integration with an IO-aware system blocking primitive,
// such as epoll on Linux, kqueue on FreeBSD and Mac OS, IOCP on Windows, and ports on Fuchsia (all
// of which are exposed through the cross-platform Rust crate mio). These primitives all allow a
// thread to block on multiple asynchronous IO events, returning once one of the events completes.
// In practice, these APIs usually look something like this:
/*struct IoBlocker {
/* ... */
}
struct Event {
// An ID uniquely identifying the event that occurred and was listened for.
id: usize,
// A set of signals to wait for, or which occurred.
signals: Signals,
}
impl IoBlocker {
/// Create a new collection of asynchronous IO events to block on.
fn new() -> Self { /* ... */ }
/// Express an interest in a particular IO event.
fn add_io_event_interest(
&self,
/// The object on which the event will occur
io_object: &IoObject,
/// A set of signals that may appear on the `io_object` for
/// which an event should be triggered, paired with
/// an ID to give to events that result from this interest.
event: Event,
) { /* ... */ }
/// Block until one of the events occurs.
fn block(&self) -> Event { /* ... */ }
}
let mut io_blocker = IoBlocker::new();
io_blocker.add_io_event_interest(
&socket_1,
Event { id: 1, signals: READABLE },
);
io_blocker.add_io_event_interest(
&socket_2,
Event { id: 2, signals: READABLE | WRITABLE },
);
let event = io_blocker.block();
// prints e.g. "Socket 1 is now READABLE" if socket one became readable.
println!("Socket {:?} is now {:?}", event.id, event.signals);
*/
/*
Futures executors can use these primitives to provide asynchronous IO objects such as sockets that
can configure callbacks to be run when a particular IO event occurs. In the case of our SocketRead
example above, the Socket::set_readable_callback function might look like the following pseudocode:
impl Socket {
fn set_readable_callback(&self, waker: Waker) {
// `local_executor` is a reference to the local executor.
// this could be provided at creation of the socket, but in practice
// many executor implementations pass it down through thread local
// storage for convenience.
let local_executor = self.local_executor;
// Unique ID for this IO object.
let id = self.id;
// Store the local waker in the executor's map so that it can be called
// once the IO event arrives.
local_executor.event_map.insert(id, waker);
local_executor.add_io_event_interest(
&self.socket_file_descriptor,
Event { id, signals: READABLE },
);
}
}
We can now have just one executor thread which can receive and dispatch any IO event to the appropriate
Waker, which will wake up the corresponding task, allowing the executor to drive more tasks to completion
before returning to check for more IO events (and the cycle continues...).
*/
// Similarly, it isn't a good idea to hold a traditional non-futures-aware lock across an .await, as
// it can cause the threadpool to lock up: one task could take out a lock, .await and yield to the
// executor, allowing another task to attempt to take the lock and cause a deadlock. To avoid this,
// use the Mutex in futures::lock rather than the one from std::sync. | {
// We're not done processing the future, so put it
// back in its task to be run again in the future.
*future_slot = Some(future);
} | conditional_block |
controller.py | """
Venter på innkommende meldinger som enten gir oppgaver å jobbe med, eller etterspør et rom å søke gjennom
Tar inn "Kode" som skal knekkes, samt rom det skal søkes i, (muligens størrelse på hver enkelt arbeidoppgave?) og eventuell hashing-algoritme.
Fordeler arbeidoppgaver ved å motta en forespørsel, og sender ut "Kode", tegn, arbeidsnodens søkerom, samt hashing-algoritme.
Tar inn resultater (enten svar, eller beskjed om at koden ikke er i søkerommet den ble gitt), og slutter å dele ut oppgaver basert på den koden om den er funnet (returnerer svar til den som sendte dette inn).
All kommunikasjon skal foregå via JSON api-kall
"""
#Webserver
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
#Other needed packages
import json
from random import random
from math import floor
#Required variables
tasks = []
#Settings
port = 80
address = 'localhost'
def main():
#def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
tasks.append(Task('a','dddd','dddc','abcde',10, 'none', gen_task_id(tasks)))
tasks.append(Task('0','5555','300','012345',10, 'none', gen_task_id(tasks)))
#Starting webServer
print('Starting Webserver on {}:{}'.format(address, port))
httpd = HTTPServer((address, port), webServer)
httpd.serve_forever()
def get_next_job():
for task in tasks:
job = task.get_task_worker()
if not job['finished']:
if job['free_block']:
return job
else:
task.set_finished()
return {
'finished':True,
'free_block':False,
'keyword':'',
'chars':'',
'algorithm':'',
'start_point':'',
'end_point':''
}
def get_next_job_json():
return json.dumps(get_next_job())
def gen_task_id(tasks):
task_id_unique = False
temp_id = floor(random()*10000)
while not task_id_unique:
task_id_unique = True
temp_id = floor(random()*10000)
for task in tasks:
if task.id == temp_id:
task_id_unique = False
return temp_id
class Task:
def __init__(self, | int, end_point, keyword, chars, searchwidth, algorithm, id):
self.start_point = start_point # Where to start searching
self.end_point = end_point # Where to end the search
self.keyword = keyword # What you're searching for
self.chars = chars # Characters that can be used
self.searchwidth = searchwidth # How large blocks will each worker-node be given at a time
self.algorithm = algorithm # How is keyword hashed/encrypted
self.id = id #ID to connect results to tasks
self.current_point = self.start_point # Will be increased as workers are given blocks to search through
self.finished = False
self.keyword_found = ""
def get_task(self):
return {
'id':self.id,
'finished':self.finished,
'algorithm':self.algorithm,
'start_point':self.start_point,
'end_point':self.end_point,
'keyword':self.keyword,
'chars':self.chars,
'searchwidth':self.searchwidth,
'current_point':self.current_point,
'keyword_found':self.keyword_found
}
def get_task_worker(self):
"""
Retrieves all info needed for worker and puts it into a dictionary
"""
start, end = self.get_block()
return {
'task_id':self.id,
'finished':self.finished,
'free_block':(start != end),
'keyword':self.keyword,
'chars':self.chars,
'algorithm':self.algorithm,
'start_point':start,
'end_point':end
}
def get_block(self):
"""
Retrives the next block to be worked on and updates current_point
"""
current_value = self.get_value(self.current_point)
endpoint_value = self.get_value(self.end_point)
worker_start = self.current_point
if (endpoint_value - current_value) > self.searchwidth:
worker_end = self.get_word_from_value(current_value + self.searchwidth - 1)
self.current_point = self.get_word_from_value(current_value + self.searchwidth)
else:
worker_end = self.end_point
self.current_point = worker_end
return worker_start, worker_end
def get_value(self, str):
"""
Beregner verdien fra base x-tall til base 10-tall
Bruker variablene:
str = ordet som det skal beregnes verdi for
self.chars = alle tegnene som brukes (f.eks. "0123456789")
"""
base = len(self.chars)
base_placement = len(str) - 1
value = 0
for symbol in str:
valueChar = self.chars.find(symbol)
value += valueChar * (base ** base_placement)
base_placement -= 1
return value
def get_word_from_value(self, value):
"""
Beregner base-x ordet gitt base-10 verdien, og returnerer denne
Variabler
value = base-10 verdien av kodeordet
str = verdiene kodeordet skal beregnes fra
self.chars = alle tegnene som brukes (f.eks. "abcdefghij")
"""
base = len(self.chars)
str = ""
while value != 0:
remainder = value % base
value = int((value - remainder) / base)
str = self.chars[remainder] + str
return str
def set_finished(self):
self.finished = True
class webServer(BaseHTTPRequestHandler):
def do_GET(self):
"""
Handles standard GET-requests and get_job-requests
"""
path = self.path
status_code, res = webServer.handle_get_msg(path)
self.send_response(status_code)
self.end_headers()
self.wfile.write(res.encode())
def handle_get_msg(path):
if path == "/get_job":
#return 200, get_next_job_json()
job = {'job':get_next_job_json()}
return 200, webServer.add_json_successfull_status(job)
elif path == "/get_tasks":
res = []
for task in tasks:
res.append(task.get_task())
return 200, json.dumps(res)
else:
return 404, 'Hello, world! \n{}'.format(path)
def do_POST(self):
"""
Handles all POST-requests, message is decoded in handle_post_msg()
"""
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
response = BytesIO()
try:
res = webServer.handle_post_msg(body)
print(res)
self.send_response(200)
except Exception as e:
print(e)
res = str(e)
self.send_response(500)
self.end_headers()
response.write(res.encode())
self.wfile.write(response.getvalue())
def handle_post_msg(body):
"""
Takes in a request, converts to dictionary and handles request according to type specified in request
Required
type
subtype
Types/subtypes:
get
job
tasks
post
result
create
task
test
post
"""
request = json.loads(body)
type = request['type']
subtype = request['subtype']
if type == 'get':
if subtype == 'job':
job = {'job':get_next_job()}
return webServer.add_json_successfull_status(job)
elif subtype == 'tasks':
res = []
for task in tasks:
res.append(task.get_task())
res = {'tasks':res}
return webServer.add_json_successfull_status(res)
elif type == 'post':
if subtype == 'result':
start_point = request['start_point']
end_point = request['end_point']
found_keyword_bool = request['found_keyword_bool']
keyword_found = request['keyword_found']
task_id = request['task_id']
if found_keyword_bool:
for task in tasks:
if task.id == task_id:
task.set_finished()
task.keyword_found = keyword_found
return webServer.make_json_status(1, "Result delivered successfully")
return webServer.make_json_status(0, "Couldn't find task")
elif type == 'create':
if subtype == 'task':
start_point = request['start_point']
end_point = request['end_point']
keyword = request['keyword']
chars = request['chars']
searchwidth = request['searchwidth']
algorithm = request['algorithm']
tasks.append(Task(start_point,end_point,keyword,chars,searchwidth,algorithm,gen_task_id(tasks)))
return webServer.make_json_status(1, "Successfully created new task")
elif type == 'test':
if subtype == 'post':
return webServer.make_json_status(1, "Successful")
#If correct type cannot be found
return webServer.make_json_status(0, "Task Failed")
def make_json_status(status_code, message):
return json.dumps({"status_code":status_code, "status":message})
def add_json_successfull_status(res):
res['status_code'] = 1
res['status'] = 'Successful'
res = json.dumps(res)
return res
if __name__=='__main__':
main()
| start_po | identifier_name |
controller.py | """
Venter på innkommende meldinger som enten gir oppgaver å jobbe med, eller etterspør et rom å søke gjennom
Tar inn "Kode" som skal knekkes, samt rom det skal søkes i, (muligens størrelse på hver enkelt arbeidoppgave?) og eventuell hashing-algoritme.
Fordeler arbeidoppgaver ved å motta en forespørsel, og sender ut "Kode", tegn, arbeidsnodens søkerom, samt hashing-algoritme.
Tar inn resultater (enten svar, eller beskjed om at koden ikke er i søkerommet den ble gitt), og slutter å dele ut oppgaver basert på den koden om den er funnet (returnerer svar til den som sendte dette inn).
All kommunikasjon skal foregå via JSON api-kall
"""
#Webserver
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
#Other needed packages
import json
from random import random
from math import floor
#Required variables
tasks = []
#Settings
port = 80
address = 'localhost'
def main():
#def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
tasks.append(Task('a','dddd','dddc','abcde',10, 'none', gen_task_id(tasks)))
tasks.append(Task('0','5555','300','012345',10, 'none', gen_task_id(tasks)))
#Starting webServer
print('Starting Webserver on {}:{}'.format(address, port))
httpd = HTTPServer((address, port), webServer)
httpd.serve_forever()
def get_next_job():
for task in tasks:
job = task.get_task_worker()
if not job['finished']:
if job['free_block']:
return job
else:
task.set_finished()
return {
'finished':True,
'free_block':False,
'keyword':'',
'chars':'',
'algorithm':'',
'start_point':'',
'end_point':''
}
def get_next_job_json():
return json.dumps(get_next_job())
def gen_task_id(tasks):
task_id_unique = False
temp_id = floor(random()*10000)
while not task_id_unique:
task_id_unique = True
temp_id = floor(random()*10000)
for task in tasks:
if task.id == temp_id:
task_id_unique = False
return temp_id
class Task:
def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
self.start_point = start_point # Where to start searching
self.end_point = end_point # Where to end the search
self.keyword = keyword # What you're searching for
self.chars = chars # Characters that can be used
self.searchwidth = searchwidth # How large blocks will each worker-node be given at a time
self.algorithm = algorithm # How is keyword hashed/encrypted
self.id = id #ID to connect results to tasks
self.current_point = self.start_point # Will be increased as workers are given blocks to search through
self.finished = False
self.keyword_found = ""
def get_task(self):
return {
'id':self.id,
'finished':self.finished,
'algorithm':self.algorithm,
'start_point':self.start_point,
'end_point':self.end_point,
'keyword':self.keyword,
'chars':self.chars,
'searchwidth':self.searchwidth,
'current_point':self.current_point,
'keyword_found':self.keyword_found
}
def get_task_worker(self):
"""
Retrieves all info needed for worker and puts it into a dictionary
"""
start, end = self.get_block()
return {
'task_id':self.id,
'finished':self.finished,
'free_block':(start != end),
'keyword':self.keyword,
'chars':self.chars,
'algorithm':self.algorithm,
'start_point':start,
'end_point':end
}
def get_block(self):
"""
Retrives the next block to be worked on and updates current_point
"""
current_value = self.get_value(self.current_point)
endpoint_value = self.get_value(self.end_point)
worker_start = self.current_point
if (endpoint_value - current_value) > self.searchwidth:
worker_end = self.get_word_from_value(current_value + self.searchwidth - 1)
self.current_point = self.get_word_from_value(current_value + self.searchwidth)
else:
worker_end = self.end_point
self.current_point = worker_end
return worker_start, worker_end
def get_value(self, str):
"""
Beregner verdien fra base x-tall til base 10-tall
Bruker variablene:
str = ordet som det skal beregnes verdi for
self.chars = alle tegnene som brukes (f.eks. "0123456789")
"""
base = len(self.chars)
base_placement = len(str) - 1
value = 0
for symbol in str:
valueChar = self.chars.find(symbol)
value += valueChar * (base ** base_placement)
base_placement -= 1
return value
def get_word_from_value(self, value):
"""
Beregner base-x ordet gitt base-10 verdien, og returnerer denne
Variabler
value = base-10 verdien av kodeordet
str = verdiene kodeordet skal beregnes fra
self.chars = alle tegnene som brukes (f.eks. "abcdefghij")
"""
base = len(self.chars)
str = ""
while value != 0:
remainder = value % base
value = int((value - remainder) / base)
str = self.chars[remainder] + str
return str
def set_finished(self):
self.finished = True
class webServer(BaseHTTPRequestHandler):
def do_GET(self):
"""
Handles standard GET-requests and get_job-requests
"""
path = self.path
status_code, res = webServer.handle_get_msg(path)
self.send_response(status_code)
self.end_headers()
self.wfile.write(res.encode())
def handle_get_msg(path):
if path == "/get_job":
#return 200, get_next_job_json()
job = {'job':get_next_job_json()}
return 200, webServer.add_json_successfull_status(job)
elif path == "/get_tasks":
res = []
for task in tasks:
res.append(task.get_task())
return 200, json.dumps(res)
else:
return 404, 'Hello, world! \n{}'.format(path)
def do_POST(self):
"""
Handles all POST-requests, message is decoded in handle_post_msg()
"""
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
response = BytesIO()
try:
res = webServer.handle_post_msg(body)
print(res)
self.send_response(200)
except Exception as e:
print(e)
res = str(e)
self.send_response(500)
self.end_headers()
response.write(res.encode())
self.wfile.write(response.getvalue())
def handle_post_msg(body):
"""
Takes in a request, converts to dictionary and handles request according to type specified in request
Required
type
subtype
Types/subtypes:
get
job
tasks
post
result
create
task
test
post
"""
request = json.loads(body)
type = request['type']
subtype = request['subtype']
if type == 'get':
if subtype == 'job':
job = {'job':get_next_job()}
return webServer.add_json_successfull_status(job)
elif subtype == 'tasks':
res = []
for task in tasks:
res.append(task.get_task())
res = {'tasks':res}
return webServer.add_json_successfull_status(res)
elif type == 'post':
if subtype == 'result':
start_point = request['start_point']
end_point = request['end_point']
found_keyword_bool = request['found_keyword_bool']
keyword_found = request['keyword_found']
task_id = request['task_id']
if found_keyword_bool:
for task in tasks:
if task.id == task_id:
task.set_finished()
task.keyword_found = keyword_found
return webServer.make_json_status(1, "Result delivered successfully")
return webServer.make_json_status(0, "Couldn't find task")
elif type == 'create':
if subtype == 'task':
start_point = request['start_point']
end_point = request['end_point']
keyword = request['keyword']
chars = request['chars']
searchwidth = request['searchwidth']
algorithm = request['algorithm']
tasks.append(Task(start_point,end_point,keyword,chars,searchwidth,algorithm,gen_task_id(tasks)))
return webServer.make_json_status(1, "Successfully created new task")
elif type == 'test':
if subtype == 'post':
return webServer.make_json_status(1, "Successful")
#If correct type cannot be found
return webServer.make_json_status(0, "Task Failed")
def make_json_status(status_code, message):
return json.dumps({"status_code":status_code, "status":message})
def add_json_successfull_status(res):
res['status_code'] = 1
res['status'] = 'Successful'
res = json.dumps(res)
return res
if __name__=='__main__':
main()
| conditional_block | ||
controller.py | """
Venter på innkommende meldinger som enten gir oppgaver å jobbe med, eller etterspør et rom å søke gjennom
Tar inn "Kode" som skal knekkes, samt rom det skal søkes i, (muligens størrelse på hver enkelt arbeidoppgave?) og eventuell hashing-algoritme.
Fordeler arbeidoppgaver ved å motta en forespørsel, og sender ut "Kode", tegn, arbeidsnodens søkerom, samt hashing-algoritme.
Tar inn resultater (enten svar, eller beskjed om at koden ikke er i søkerommet den ble gitt), og slutter å dele ut oppgaver basert på den koden om den er funnet (returnerer svar til den som sendte dette inn).
All kommunikasjon skal foregå via JSON api-kall
"""
#Webserver
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
#Other needed packages
import json
from random import random
from math import floor
#Required variables
tasks = []
#Settings
port = 80
address = 'localhost'
def main():
#def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
tasks.append(Task('a','dddd','dddc','abcde',10, 'none', gen_task_id(tasks)))
tasks.append(Task('0','5555','300','012345',10, 'none', gen_task_id(tasks)))
#Starting webServer
print('Starting Webserver on {}:{}'.format(address, port))
httpd = HTTPServer((address, port), webServer)
httpd.serve_forever()
def get_next_job():
for task in tasks:
job = task.get_task_worker()
if not job['finished']:
if job['free_block']:
return job
else:
task.set_finished()
return {
'finished':True,
'free_block':False,
'keyword':'',
'chars':'',
'algorithm':'',
'start_point':'',
'end_point':''
}
def get_next_job_json():
return json.dum | k_id(tasks):
task_id_unique = False
temp_id = floor(random()*10000)
while not task_id_unique:
task_id_unique = True
temp_id = floor(random()*10000)
for task in tasks:
if task.id == temp_id:
task_id_unique = False
return temp_id
class Task:
def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
self.start_point = start_point # Where to start searching
self.end_point = end_point # Where to end the search
self.keyword = keyword # What you're searching for
self.chars = chars # Characters that can be used
self.searchwidth = searchwidth # How large blocks will each worker-node be given at a time
self.algorithm = algorithm # How is keyword hashed/encrypted
self.id = id #ID to connect results to tasks
self.current_point = self.start_point # Will be increased as workers are given blocks to search through
self.finished = False
self.keyword_found = ""
def get_task(self):
return {
'id':self.id,
'finished':self.finished,
'algorithm':self.algorithm,
'start_point':self.start_point,
'end_point':self.end_point,
'keyword':self.keyword,
'chars':self.chars,
'searchwidth':self.searchwidth,
'current_point':self.current_point,
'keyword_found':self.keyword_found
}
def get_task_worker(self):
"""
Retrieves all info needed for worker and puts it into a dictionary
"""
start, end = self.get_block()
return {
'task_id':self.id,
'finished':self.finished,
'free_block':(start != end),
'keyword':self.keyword,
'chars':self.chars,
'algorithm':self.algorithm,
'start_point':start,
'end_point':end
}
def get_block(self):
"""
Retrives the next block to be worked on and updates current_point
"""
current_value = self.get_value(self.current_point)
endpoint_value = self.get_value(self.end_point)
worker_start = self.current_point
if (endpoint_value - current_value) > self.searchwidth:
worker_end = self.get_word_from_value(current_value + self.searchwidth - 1)
self.current_point = self.get_word_from_value(current_value + self.searchwidth)
else:
worker_end = self.end_point
self.current_point = worker_end
return worker_start, worker_end
def get_value(self, str):
"""
Beregner verdien fra base x-tall til base 10-tall
Bruker variablene:
str = ordet som det skal beregnes verdi for
self.chars = alle tegnene som brukes (f.eks. "0123456789")
"""
base = len(self.chars)
base_placement = len(str) - 1
value = 0
for symbol in str:
valueChar = self.chars.find(symbol)
value += valueChar * (base ** base_placement)
base_placement -= 1
return value
def get_word_from_value(self, value):
"""
Beregner base-x ordet gitt base-10 verdien, og returnerer denne
Variabler
value = base-10 verdien av kodeordet
str = verdiene kodeordet skal beregnes fra
self.chars = alle tegnene som brukes (f.eks. "abcdefghij")
"""
base = len(self.chars)
str = ""
while value != 0:
remainder = value % base
value = int((value - remainder) / base)
str = self.chars[remainder] + str
return str
def set_finished(self):
self.finished = True
class webServer(BaseHTTPRequestHandler):
def do_GET(self):
"""
Handles standard GET-requests and get_job-requests
"""
path = self.path
status_code, res = webServer.handle_get_msg(path)
self.send_response(status_code)
self.end_headers()
self.wfile.write(res.encode())
def handle_get_msg(path):
if path == "/get_job":
#return 200, get_next_job_json()
job = {'job':get_next_job_json()}
return 200, webServer.add_json_successfull_status(job)
elif path == "/get_tasks":
res = []
for task in tasks:
res.append(task.get_task())
return 200, json.dumps(res)
else:
return 404, 'Hello, world! \n{}'.format(path)
def do_POST(self):
"""
Handles all POST-requests, message is decoded in handle_post_msg()
"""
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
response = BytesIO()
try:
res = webServer.handle_post_msg(body)
print(res)
self.send_response(200)
except Exception as e:
print(e)
res = str(e)
self.send_response(500)
self.end_headers()
response.write(res.encode())
self.wfile.write(response.getvalue())
def handle_post_msg(body):
"""
Takes in a request, converts to dictionary and handles request according to type specified in request
Required
type
subtype
Types/subtypes:
get
job
tasks
post
result
create
task
test
post
"""
request = json.loads(body)
type = request['type']
subtype = request['subtype']
if type == 'get':
if subtype == 'job':
job = {'job':get_next_job()}
return webServer.add_json_successfull_status(job)
elif subtype == 'tasks':
res = []
for task in tasks:
res.append(task.get_task())
res = {'tasks':res}
return webServer.add_json_successfull_status(res)
elif type == 'post':
if subtype == 'result':
start_point = request['start_point']
end_point = request['end_point']
found_keyword_bool = request['found_keyword_bool']
keyword_found = request['keyword_found']
task_id = request['task_id']
if found_keyword_bool:
for task in tasks:
if task.id == task_id:
task.set_finished()
task.keyword_found = keyword_found
return webServer.make_json_status(1, "Result delivered successfully")
return webServer.make_json_status(0, "Couldn't find task")
elif type == 'create':
if subtype == 'task':
start_point = request['start_point']
end_point = request['end_point']
keyword = request['keyword']
chars = request['chars']
searchwidth = request['searchwidth']
algorithm = request['algorithm']
tasks.append(Task(start_point,end_point,keyword,chars,searchwidth,algorithm,gen_task_id(tasks)))
return webServer.make_json_status(1, "Successfully created new task")
elif type == 'test':
if subtype == 'post':
return webServer.make_json_status(1, "Successful")
#If correct type cannot be found
return webServer.make_json_status(0, "Task Failed")
def make_json_status(status_code, message):
return json.dumps({"status_code":status_code, "status":message})
def add_json_successfull_status(res):
res['status_code'] = 1
res['status'] = 'Successful'
res = json.dumps(res)
return res
if __name__=='__main__':
main()
| ps(get_next_job())
def gen_tas | identifier_body |
controller.py | """
Venter på innkommende meldinger som enten gir oppgaver å jobbe med, eller etterspør et rom å søke gjennom
Tar inn "Kode" som skal knekkes, samt rom det skal søkes i, (muligens størrelse på hver enkelt arbeidoppgave?) og eventuell hashing-algoritme.
Fordeler arbeidoppgaver ved å motta en forespørsel, og sender ut "Kode", tegn, arbeidsnodens søkerom, samt hashing-algoritme.
Tar inn resultater (enten svar, eller beskjed om at koden ikke er i søkerommet den ble gitt), og slutter å dele ut oppgaver basert på den koden om den er funnet (returnerer svar til den som sendte dette inn).
All kommunikasjon skal foregå via JSON api-kall
"""
#Webserver
from http.server import HTTPServer, BaseHTTPRequestHandler
from io import BytesIO
#Other needed packages
import json
from random import random
from math import floor
#Required variables
tasks = []
#Settings
port = 80
address = 'localhost'
def main():
#def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
tasks.append(Task('a','dddd','dddc','abcde',10, 'none', gen_task_id(tasks)))
tasks.append(Task('0','5555','300','012345',10, 'none', gen_task_id(tasks)))
#Starting webServer
print('Starting Webserver on {}:{}'.format(address, port))
httpd = HTTPServer((address, port), webServer)
httpd.serve_forever()
def get_next_job():
for task in tasks:
job = task.get_task_worker()
if not job['finished']:
if job['free_block']:
return job
else:
task.set_finished()
return {
'finished':True,
'free_block':False,
'keyword':'',
'chars':'',
'algorithm':'',
'start_point':'',
'end_point':''
}
def get_next_job_json():
return json.dumps(get_next_job())
def gen_task_id(tasks):
task_id_unique = False
temp_id = floor(random()*10000)
while not task_id_unique:
task_id_unique = True
temp_id = floor(random()*10000)
for task in tasks:
if task.id == temp_id:
task_id_unique = False
return temp_id
class Task:
def __init__(self, start_point, end_point, keyword, chars, searchwidth, algorithm, id):
self.start_point = start_point # Where to start searching
self.end_point = end_point # Where to end the search
self.keyword = keyword # What you're searching for
self.chars = chars # Characters that can be used
self.searchwidth = searchwidth # How large blocks will each worker-node be given at a time
self.algorithm = algorithm # How is keyword hashed/encrypted
self.id = id #ID to connect results to tasks
|
self.current_point = self.start_point # Will be increased as workers are given blocks to search through
self.finished = False
self.keyword_found = ""
def get_task(self):
return {
'id':self.id,
'finished':self.finished,
'algorithm':self.algorithm,
'start_point':self.start_point,
'end_point':self.end_point,
'keyword':self.keyword,
'chars':self.chars,
'searchwidth':self.searchwidth,
'current_point':self.current_point,
'keyword_found':self.keyword_found
}
def get_task_worker(self):
"""
Retrieves all info needed for worker and puts it into a dictionary
"""
start, end = self.get_block()
return {
'task_id':self.id,
'finished':self.finished,
'free_block':(start != end),
'keyword':self.keyword,
'chars':self.chars,
'algorithm':self.algorithm,
'start_point':start,
'end_point':end
}
def get_block(self):
"""
Retrives the next block to be worked on and updates current_point
"""
current_value = self.get_value(self.current_point)
endpoint_value = self.get_value(self.end_point)
worker_start = self.current_point
if (endpoint_value - current_value) > self.searchwidth:
worker_end = self.get_word_from_value(current_value + self.searchwidth - 1)
self.current_point = self.get_word_from_value(current_value + self.searchwidth)
else:
worker_end = self.end_point
self.current_point = worker_end
return worker_start, worker_end
def get_value(self, str):
"""
Beregner verdien fra base x-tall til base 10-tall
Bruker variablene:
str = ordet som det skal beregnes verdi for
self.chars = alle tegnene som brukes (f.eks. "0123456789")
"""
base = len(self.chars)
base_placement = len(str) - 1
value = 0
for symbol in str:
valueChar = self.chars.find(symbol)
value += valueChar * (base ** base_placement)
base_placement -= 1
return value
def get_word_from_value(self, value):
"""
Beregner base-x ordet gitt base-10 verdien, og returnerer denne
Variabler
value = base-10 verdien av kodeordet
str = verdiene kodeordet skal beregnes fra
self.chars = alle tegnene som brukes (f.eks. "abcdefghij")
"""
base = len(self.chars)
str = ""
while value != 0:
remainder = value % base
value = int((value - remainder) / base)
str = self.chars[remainder] + str
return str
def set_finished(self):
self.finished = True
class webServer(BaseHTTPRequestHandler):
def do_GET(self):
"""
Handles standard GET-requests and get_job-requests
"""
path = self.path
status_code, res = webServer.handle_get_msg(path)
self.send_response(status_code)
self.end_headers()
self.wfile.write(res.encode())
def handle_get_msg(path):
if path == "/get_job":
#return 200, get_next_job_json()
job = {'job':get_next_job_json()}
return 200, webServer.add_json_successfull_status(job)
elif path == "/get_tasks":
res = []
for task in tasks:
res.append(task.get_task())
return 200, json.dumps(res)
else:
return 404, 'Hello, world! \n{}'.format(path)
def do_POST(self):
"""
Handles all POST-requests, message is decoded in handle_post_msg()
"""
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
response = BytesIO()
try:
res = webServer.handle_post_msg(body)
print(res)
self.send_response(200)
except Exception as e:
print(e)
res = str(e)
self.send_response(500)
self.end_headers()
response.write(res.encode())
self.wfile.write(response.getvalue())
def handle_post_msg(body):
"""
Takes in a request, converts to dictionary and handles request according to type specified in request
Required
type
subtype
Types/subtypes:
get
job
tasks
post
result
create
task
test
post
"""
request = json.loads(body)
type = request['type']
subtype = request['subtype']
if type == 'get':
if subtype == 'job':
job = {'job':get_next_job()}
return webServer.add_json_successfull_status(job)
elif subtype == 'tasks':
res = []
for task in tasks:
res.append(task.get_task())
res = {'tasks':res}
return webServer.add_json_successfull_status(res)
elif type == 'post':
if subtype == 'result':
start_point = request['start_point']
end_point = request['end_point']
found_keyword_bool = request['found_keyword_bool']
keyword_found = request['keyword_found']
task_id = request['task_id']
if found_keyword_bool:
for task in tasks:
if task.id == task_id:
task.set_finished()
task.keyword_found = keyword_found
return webServer.make_json_status(1, "Result delivered successfully")
return webServer.make_json_status(0, "Couldn't find task")
elif type == 'create':
if subtype == 'task':
start_point = request['start_point']
end_point = request['end_point']
keyword = request['keyword']
chars = request['chars']
searchwidth = request['searchwidth']
algorithm = request['algorithm']
tasks.append(Task(start_point,end_point,keyword,chars,searchwidth,algorithm,gen_task_id(tasks)))
return webServer.make_json_status(1, "Successfully created new task")
elif type == 'test':
if subtype == 'post':
return webServer.make_json_status(1, "Successful")
#If correct type cannot be found
return webServer.make_json_status(0, "Task Failed")
def make_json_status(status_code, message):
return json.dumps({"status_code":status_code, "status":message})
def add_json_successfull_status(res):
res['status_code'] = 1
res['status'] = 'Successful'
res = json.dumps(res)
return res
if __name__=='__main__':
main() | random_line_split | |
main.rs | //! # Basic Subclass example
//!
//! This file creates a `GtkApplication` and a `GtkApplicationWindow` subclass
//! and showcases how you can override virtual funcitons such as `startup`
//! and `activate` and how to interact with the GObjects and their private
//! structs.
extern crate gstreamer as gst;
extern crate gstreamer_player as gst_player;
use gst::prelude::*;
use std::sync::{Arc}
#[macro_use]
extern crate glib;
extern crate gio;
extern crate gtk;
extern crate once_cell;
use gio::prelude::*;
use gtk::prelude::*;
use gio::subclass::application::ApplicationImplExt;
use gio::ApplicationFlags;
use glib::subclass;
use glib::subclass::prelude::*;
use glib::translate::*;
use gtk::subclass::prelude::*;
use once_cell::unsync::OnceCell;
use std::cell::Cell;
mod audio_handler;
#[derive(Debug)]
struct WindowWidgets {
headerbar: gtk::HeaderBar,
increment: gtk::Button,
decrement: gtk::Button,
reset: gtk::Button,
label: gtk::Label,
}
// This is the private part of our `SimpleWindow` object.
// Its where state and widgets are stored when they don't
// need to be publicly accesible.
#[derive(Debug)]
pub struct SimpleWindowPrivate {
widgets: OnceCell<WindowWidgets>,
counter: Cell<i64>,
}
impl ObjectSubclass for SimpleWindowPrivate {
const NAME: &'static str = "SimpleWindowPrivate";
type ParentType = gtk::ApplicationWindow;
type Instance = subclass::simple::InstanceStruct<Self>;
type Class = subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn new() -> Self {
Self {
widgets: OnceCell::new(),
counter: Cell::new(0),
}
}
}
static MUSIC_FOLDER: &str = "musics";
impl ObjectImpl for SimpleWindowPrivate {
glib_object_impl!();
// Here we are overriding the glib::Objcet::contructed
// method. Its what gets called when we create our Object
// and where we can initialize things.
fn constructed(&self, obj: &glib::Object) {
// ==== MUSIC SELCTOR BOX =====
let combo_box = gtk::ComboBoxTextBuilder::new()
.width_request(50)
.build();
let all_musics = std::fs::read_dir(MUSIC_FOLDER).unwrap().filter(|e| e.is_ok()).map(|e| e.unwrap());
all_musics.enumerate().for_each(|(idx, v)| {
let name = v.path();
let name = name.to_string_lossy().replace(&format!("{}/", MUSIC_FOLDER), "");
println!("{}", name);
combo_box.insert(idx as i32, None, &name);
});
let combo_box = Arc::new(combo_box);
// Audio player handle
let audio_player = Arc::new(audio_handler::AudioHandler::new());
self.parent_constructed(obj);
let self_ = obj.downcast_ref::<SimpleWindow>().unwrap();
// Basic UI elements
let headerbar = gtk::HeaderBar::new();
let increment = gtk::Button::new_with_label("Add meaning to my life");
let reset = gtk::Button::new_with_label("Reset my life");
let no = gtk::Button::new_with_label("no");
let decrement = gtk::Button::new_with_label("Remove meaning from my life ;_;");
let label = gtk::Label::new(Some("What doth life has for you?"));
let bbox = gtk::BoxBuilder::new()
.orientation(gtk::Orientation::Vertical)
.build();
let play_button = gtk::Button::new_with_label("Play");
let pause_button = gtk::Button::new_with_label("Pause");
let tbox = gtk::EntryBuilder::new()
.height_request(10)
.activates_default(true)
.build();
tbox.set_text("I don't know what to do with that textbox DD:");
let test = Arc::new(tbox);
let inner_tbox = test.clone();
test.clone().connect_activate(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
inner_tbox.set_text("WHy u pressed enter DDD:");
priv_.widgets.get().unwrap().label.set_text("WHy u pressed enter DDD:");
}));
bbox.pack_start(test.as_ref(), false, false, 100);
bbox.pack_start(&reset, false, false, 10);
bbox.pack_start(&no, false, false, 10);
bbox.pack_start(&label, false, false, 10);
bbox.pack_start(&play_button, false, false, 10);
bbox.pack_start(&pause_button, false, false, 10);
bbox.pack_start(combo_box.as_ref(), false, false, 10);
headerbar.set_title(Some("This is your life now"));
headerbar.set_show_close_button(true);
headerbar.pack_start(&increment);
headerbar.pack_start(&decrement);
let audio_player_clone = audio_player.clone();
let combo_box_clone = combo_box.clone();
// Music buttons closures
play_button.connect_clicked(move |_| {
let music = combo_box_clone.get_active_text().unwrap();
let music = format!("{}/{}", MUSIC_FOLDER, music.as_str());
audio_player_clone.play_music(music);
});
let audio_player_clone = audio_player.clone();
pause_button.connect_clicked(move |_| {
audio_player_clone.pause_music();
});
// Connect our method `on_increment_clicked` to be called
// when the increment button is clicked.
increment.connect_clicked(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
priv_.on_increment_clicked();
}));
decrement.connect_clicked(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
priv_.on_decrement_clicked();
}));
reset.connect_clicked(clone!(@weak self_ => move |_| {
println!("Maybe ;___;");
}));
self_.add(&bbox);
// self_.add(&label);
self_.set_titlebar(Some(&headerbar));
self_.set_default_size(640, 480);
self.widgets
.set(WindowWidgets {
headerbar,
label,
increment,
decrement,
reset,
})
.expect("Failed to initialize window state");
}
}
impl SimpleWindowPrivate {
fn on_increment_clicked(&self) {
self.counter.set(self.counter.get() + 1);
let w = self.widgets.get().unwrap();
w.label
.set_text(&format!("Your life has {} meaning", self.counter.get()));
}
fn on_decrement_clicked(&self) {
self.counter.set(self.counter.get().wrapping_sub(1));
let w = self.widgets.get().unwrap();
w.label
.set_text(&format!("Your life has {} meaning", self.counter.get()));
}
}
impl WidgetImpl for SimpleWindowPrivate {}
impl ContainerImpl for SimpleWindowPrivate {}
impl BinImpl for SimpleWindowPrivate {}
impl WindowImpl for SimpleWindowPrivate {}
impl ApplicationWindowImpl for SimpleWindowPrivate {}
glib_wrapper! {
pub struct SimpleWindow(
Object<subclass::simple::InstanceStruct<SimpleWindowPrivate>,
subclass::simple::ClassStruct<SimpleWindowPrivate>,
SimpleAppWindowClass>)
@extends gtk::Widget, gtk::Container, gtk::Bin, gtk::Window, gtk::ApplicationWindow;
match fn {
get_type => || SimpleWindowPrivate::get_type().to_glib(),
}
}
impl SimpleWindow {
pub fn new(app: >k::Application) -> Self {
glib::Object::new(Self::static_type(), &[("application", app)])
.expect("Failed to create SimpleWindow")
.downcast::<SimpleWindow>()
.expect("Created SimpleWindow is of wrong type")
}
}
#[derive(Debug)]
pub struct SimpleApplicationPrivate {
window: OnceCell<SimpleWindow>,
}
impl ObjectSubclass for SimpleApplicationPrivate {
const NAME: &'static str = "SimpleApplicationPrivate";
type ParentType = gtk::Application;
type Instance = subclass::simple::InstanceStruct<Self>;
type Class = subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn new() -> Self {
Self {
window: OnceCell::new(),
}
}
}
impl ObjectImpl for SimpleApplicationPrivate {
glib_object_impl!();
}
// When our application starts, the `startup` signal will be fired.
// This gives us a chance to perform initialisation tasks that are not directly
// related to showing a new window. After this, depending on how
// the application is started, either `activate` or `open` will be called next.
impl ApplicationImpl for SimpleApplicationPrivate {
// `gio::Application::activate` is what gets called when the
// application is launched by the desktop environment and
// aksed to present itself.
fn activate(&self, app: &gio::Application) {
let app = app.downcast_ref::<gtk::Application>().unwrap();
let priv_ = SimpleApplicationPrivate::from_instance(app);
let window = priv_
.window
.get()
.expect("Should always be initiliazed in gio_application_startup");
window.show_all();
window.present();
}
// `gio::Application` is bit special. It does not get initialized
// when `new` is called and the object created, but rather
// once the `startup` signal is emitted and the `gio::Application::startup`
// is called.
//
// Due to this, we create and initialize the `SimpleWindow` widget
// here. Widgets can't be created before `startup` has been called.
fn startup(&self, app: &gio::Application) {
self.parent_startup(app);
let app = app.downcast_ref::<gtk::Application>().unwrap();
let priv_ = SimpleApplicationPrivate::from_instance(app);
let window = SimpleWindow::new(&app);
priv_
.window
.set(window)
.expect("Failed to initialize application window");
}
}
impl GtkApplicationImpl for SimpleApplicationPrivate {}
glib_wrapper! {
pub struct SimpleApplication(
Object<subclass::simple::InstanceStruct<SimpleApplicationPrivate>,
subclass::simple::ClassStruct<SimpleApplicationPrivate>,
SimpleApplicationClass>)
@extends gio::Application, gtk::Application;
match fn {
get_type => || SimpleApplicationPrivate::get_type().to_glib(),
}
}
impl SimpleApplication {
pub fn new() -> Self |
}
use std::time::Duration;
use std::io::Seek;
use std::io::SeekFrom;
fn main() {
gtk::init().expect("Failed to initialize gtk");
let app = SimpleApplication::new();
let args: Vec<String> = std::env::args().collect();
app.run(&args);
} | {
glib::Object::new(
Self::static_type(),
&[
("application-id", &"org.gtk-rs.SimpleApplication"),
("flags", &ApplicationFlags::empty()),
],
)
.expect("Failed to create SimpleApp")
.downcast()
.expect("Created simpleapp is of wrong type")
} | identifier_body |
main.rs | //! # Basic Subclass example
//!
//! This file creates a `GtkApplication` and a `GtkApplicationWindow` subclass
//! and showcases how you can override virtual funcitons such as `startup`
//! and `activate` and how to interact with the GObjects and their private
//! structs.
extern crate gstreamer as gst;
extern crate gstreamer_player as gst_player;
use gst::prelude::*;
use std::sync::{Arc}
#[macro_use]
extern crate glib;
extern crate gio;
extern crate gtk;
extern crate once_cell;
use gio::prelude::*;
use gtk::prelude::*;
use gio::subclass::application::ApplicationImplExt;
use gio::ApplicationFlags;
use glib::subclass;
use glib::subclass::prelude::*;
use glib::translate::*;
use gtk::subclass::prelude::*;
use once_cell::unsync::OnceCell;
use std::cell::Cell;
mod audio_handler;
#[derive(Debug)]
struct WindowWidgets {
headerbar: gtk::HeaderBar,
increment: gtk::Button,
decrement: gtk::Button,
reset: gtk::Button,
label: gtk::Label,
}
// This is the private part of our `SimpleWindow` object.
// Its where state and widgets are stored when they don't
// need to be publicly accesible.
#[derive(Debug)]
pub struct SimpleWindowPrivate {
widgets: OnceCell<WindowWidgets>,
counter: Cell<i64>,
}
impl ObjectSubclass for SimpleWindowPrivate {
const NAME: &'static str = "SimpleWindowPrivate";
type ParentType = gtk::ApplicationWindow;
type Instance = subclass::simple::InstanceStruct<Self>;
type Class = subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn new() -> Self {
Self {
widgets: OnceCell::new(),
counter: Cell::new(0),
}
}
}
static MUSIC_FOLDER: &str = "musics";
impl ObjectImpl for SimpleWindowPrivate {
glib_object_impl!();
// Here we are overriding the glib::Objcet::contructed
// method. Its what gets called when we create our Object
// and where we can initialize things.
fn constructed(&self, obj: &glib::Object) {
// ==== MUSIC SELCTOR BOX =====
let combo_box = gtk::ComboBoxTextBuilder::new()
.width_request(50)
.build();
let all_musics = std::fs::read_dir(MUSIC_FOLDER).unwrap().filter(|e| e.is_ok()).map(|e| e.unwrap());
all_musics.enumerate().for_each(|(idx, v)| {
let name = v.path();
let name = name.to_string_lossy().replace(&format!("{}/", MUSIC_FOLDER), "");
println!("{}", name);
combo_box.insert(idx as i32, None, &name);
});
let combo_box = Arc::new(combo_box);
// Audio player handle
let audio_player = Arc::new(audio_handler::AudioHandler::new());
self.parent_constructed(obj);
let self_ = obj.downcast_ref::<SimpleWindow>().unwrap();
// Basic UI elements
let headerbar = gtk::HeaderBar::new();
let increment = gtk::Button::new_with_label("Add meaning to my life");
let reset = gtk::Button::new_with_label("Reset my life");
let no = gtk::Button::new_with_label("no");
let decrement = gtk::Button::new_with_label("Remove meaning from my life ;_;");
let label = gtk::Label::new(Some("What doth life has for you?"));
let bbox = gtk::BoxBuilder::new()
.orientation(gtk::Orientation::Vertical)
.build();
let play_button = gtk::Button::new_with_label("Play");
let pause_button = gtk::Button::new_with_label("Pause");
let tbox = gtk::EntryBuilder::new()
.height_request(10)
.activates_default(true)
.build();
tbox.set_text("I don't know what to do with that textbox DD:");
let test = Arc::new(tbox);
let inner_tbox = test.clone();
test.clone().connect_activate(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
inner_tbox.set_text("WHy u pressed enter DDD:");
priv_.widgets.get().unwrap().label.set_text("WHy u pressed enter DDD:");
}));
bbox.pack_start(test.as_ref(), false, false, 100);
bbox.pack_start(&reset, false, false, 10);
bbox.pack_start(&no, false, false, 10);
bbox.pack_start(&label, false, false, 10);
bbox.pack_start(&play_button, false, false, 10);
bbox.pack_start(&pause_button, false, false, 10);
bbox.pack_start(combo_box.as_ref(), false, false, 10);
headerbar.set_title(Some("This is your life now"));
headerbar.set_show_close_button(true);
headerbar.pack_start(&increment);
headerbar.pack_start(&decrement);
let audio_player_clone = audio_player.clone();
let combo_box_clone = combo_box.clone();
// Music buttons closures
play_button.connect_clicked(move |_| {
let music = combo_box_clone.get_active_text().unwrap();
let music = format!("{}/{}", MUSIC_FOLDER, music.as_str());
audio_player_clone.play_music(music);
});
let audio_player_clone = audio_player.clone(); | audio_player_clone.pause_music();
});
// Connect our method `on_increment_clicked` to be called
// when the increment button is clicked.
increment.connect_clicked(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
priv_.on_increment_clicked();
}));
decrement.connect_clicked(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
priv_.on_decrement_clicked();
}));
reset.connect_clicked(clone!(@weak self_ => move |_| {
println!("Maybe ;___;");
}));
self_.add(&bbox);
// self_.add(&label);
self_.set_titlebar(Some(&headerbar));
self_.set_default_size(640, 480);
self.widgets
.set(WindowWidgets {
headerbar,
label,
increment,
decrement,
reset,
})
.expect("Failed to initialize window state");
}
}
impl SimpleWindowPrivate {
fn on_increment_clicked(&self) {
self.counter.set(self.counter.get() + 1);
let w = self.widgets.get().unwrap();
w.label
.set_text(&format!("Your life has {} meaning", self.counter.get()));
}
fn on_decrement_clicked(&self) {
self.counter.set(self.counter.get().wrapping_sub(1));
let w = self.widgets.get().unwrap();
w.label
.set_text(&format!("Your life has {} meaning", self.counter.get()));
}
}
impl WidgetImpl for SimpleWindowPrivate {}
impl ContainerImpl for SimpleWindowPrivate {}
impl BinImpl for SimpleWindowPrivate {}
impl WindowImpl for SimpleWindowPrivate {}
impl ApplicationWindowImpl for SimpleWindowPrivate {}
glib_wrapper! {
pub struct SimpleWindow(
Object<subclass::simple::InstanceStruct<SimpleWindowPrivate>,
subclass::simple::ClassStruct<SimpleWindowPrivate>,
SimpleAppWindowClass>)
@extends gtk::Widget, gtk::Container, gtk::Bin, gtk::Window, gtk::ApplicationWindow;
match fn {
get_type => || SimpleWindowPrivate::get_type().to_glib(),
}
}
impl SimpleWindow {
pub fn new(app: >k::Application) -> Self {
glib::Object::new(Self::static_type(), &[("application", app)])
.expect("Failed to create SimpleWindow")
.downcast::<SimpleWindow>()
.expect("Created SimpleWindow is of wrong type")
}
}
#[derive(Debug)]
pub struct SimpleApplicationPrivate {
window: OnceCell<SimpleWindow>,
}
impl ObjectSubclass for SimpleApplicationPrivate {
const NAME: &'static str = "SimpleApplicationPrivate";
type ParentType = gtk::Application;
type Instance = subclass::simple::InstanceStruct<Self>;
type Class = subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn new() -> Self {
Self {
window: OnceCell::new(),
}
}
}
impl ObjectImpl for SimpleApplicationPrivate {
glib_object_impl!();
}
// When our application starts, the `startup` signal will be fired.
// This gives us a chance to perform initialisation tasks that are not directly
// related to showing a new window. After this, depending on how
// the application is started, either `activate` or `open` will be called next.
impl ApplicationImpl for SimpleApplicationPrivate {
// `gio::Application::activate` is what gets called when the
// application is launched by the desktop environment and
// aksed to present itself.
fn activate(&self, app: &gio::Application) {
let app = app.downcast_ref::<gtk::Application>().unwrap();
let priv_ = SimpleApplicationPrivate::from_instance(app);
let window = priv_
.window
.get()
.expect("Should always be initiliazed in gio_application_startup");
window.show_all();
window.present();
}
// `gio::Application` is bit special. It does not get initialized
// when `new` is called and the object created, but rather
// once the `startup` signal is emitted and the `gio::Application::startup`
// is called.
//
// Due to this, we create and initialize the `SimpleWindow` widget
// here. Widgets can't be created before `startup` has been called.
fn startup(&self, app: &gio::Application) {
self.parent_startup(app);
let app = app.downcast_ref::<gtk::Application>().unwrap();
let priv_ = SimpleApplicationPrivate::from_instance(app);
let window = SimpleWindow::new(&app);
priv_
.window
.set(window)
.expect("Failed to initialize application window");
}
}
impl GtkApplicationImpl for SimpleApplicationPrivate {}
glib_wrapper! {
pub struct SimpleApplication(
Object<subclass::simple::InstanceStruct<SimpleApplicationPrivate>,
subclass::simple::ClassStruct<SimpleApplicationPrivate>,
SimpleApplicationClass>)
@extends gio::Application, gtk::Application;
match fn {
get_type => || SimpleApplicationPrivate::get_type().to_glib(),
}
}
impl SimpleApplication {
pub fn new() -> Self {
glib::Object::new(
Self::static_type(),
&[
("application-id", &"org.gtk-rs.SimpleApplication"),
("flags", &ApplicationFlags::empty()),
],
)
.expect("Failed to create SimpleApp")
.downcast()
.expect("Created simpleapp is of wrong type")
}
}
use std::time::Duration;
use std::io::Seek;
use std::io::SeekFrom;
fn main() {
gtk::init().expect("Failed to initialize gtk");
let app = SimpleApplication::new();
let args: Vec<String> = std::env::args().collect();
app.run(&args);
} | pause_button.connect_clicked(move |_| { | random_line_split |
main.rs | //! # Basic Subclass example
//!
//! This file creates a `GtkApplication` and a `GtkApplicationWindow` subclass
//! and showcases how you can override virtual funcitons such as `startup`
//! and `activate` and how to interact with the GObjects and their private
//! structs.
extern crate gstreamer as gst;
extern crate gstreamer_player as gst_player;
use gst::prelude::*;
use std::sync::{Arc}
#[macro_use]
extern crate glib;
extern crate gio;
extern crate gtk;
extern crate once_cell;
use gio::prelude::*;
use gtk::prelude::*;
use gio::subclass::application::ApplicationImplExt;
use gio::ApplicationFlags;
use glib::subclass;
use glib::subclass::prelude::*;
use glib::translate::*;
use gtk::subclass::prelude::*;
use once_cell::unsync::OnceCell;
use std::cell::Cell;
mod audio_handler;
#[derive(Debug)]
struct WindowWidgets {
headerbar: gtk::HeaderBar,
increment: gtk::Button,
decrement: gtk::Button,
reset: gtk::Button,
label: gtk::Label,
}
// This is the private part of our `SimpleWindow` object.
// Its where state and widgets are stored when they don't
// need to be publicly accesible.
#[derive(Debug)]
pub struct SimpleWindowPrivate {
widgets: OnceCell<WindowWidgets>,
counter: Cell<i64>,
}
impl ObjectSubclass for SimpleWindowPrivate {
const NAME: &'static str = "SimpleWindowPrivate";
type ParentType = gtk::ApplicationWindow;
type Instance = subclass::simple::InstanceStruct<Self>;
type Class = subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn new() -> Self {
Self {
widgets: OnceCell::new(),
counter: Cell::new(0),
}
}
}
static MUSIC_FOLDER: &str = "musics";
impl ObjectImpl for SimpleWindowPrivate {
glib_object_impl!();
// Here we are overriding the glib::Objcet::contructed
// method. Its what gets called when we create our Object
// and where we can initialize things.
fn constructed(&self, obj: &glib::Object) {
// ==== MUSIC SELCTOR BOX =====
let combo_box = gtk::ComboBoxTextBuilder::new()
.width_request(50)
.build();
let all_musics = std::fs::read_dir(MUSIC_FOLDER).unwrap().filter(|e| e.is_ok()).map(|e| e.unwrap());
all_musics.enumerate().for_each(|(idx, v)| {
let name = v.path();
let name = name.to_string_lossy().replace(&format!("{}/", MUSIC_FOLDER), "");
println!("{}", name);
combo_box.insert(idx as i32, None, &name);
});
let combo_box = Arc::new(combo_box);
// Audio player handle
let audio_player = Arc::new(audio_handler::AudioHandler::new());
self.parent_constructed(obj);
let self_ = obj.downcast_ref::<SimpleWindow>().unwrap();
// Basic UI elements
let headerbar = gtk::HeaderBar::new();
let increment = gtk::Button::new_with_label("Add meaning to my life");
let reset = gtk::Button::new_with_label("Reset my life");
let no = gtk::Button::new_with_label("no");
let decrement = gtk::Button::new_with_label("Remove meaning from my life ;_;");
let label = gtk::Label::new(Some("What doth life has for you?"));
let bbox = gtk::BoxBuilder::new()
.orientation(gtk::Orientation::Vertical)
.build();
let play_button = gtk::Button::new_with_label("Play");
let pause_button = gtk::Button::new_with_label("Pause");
let tbox = gtk::EntryBuilder::new()
.height_request(10)
.activates_default(true)
.build();
tbox.set_text("I don't know what to do with that textbox DD:");
let test = Arc::new(tbox);
let inner_tbox = test.clone();
test.clone().connect_activate(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
inner_tbox.set_text("WHy u pressed enter DDD:");
priv_.widgets.get().unwrap().label.set_text("WHy u pressed enter DDD:");
}));
bbox.pack_start(test.as_ref(), false, false, 100);
bbox.pack_start(&reset, false, false, 10);
bbox.pack_start(&no, false, false, 10);
bbox.pack_start(&label, false, false, 10);
bbox.pack_start(&play_button, false, false, 10);
bbox.pack_start(&pause_button, false, false, 10);
bbox.pack_start(combo_box.as_ref(), false, false, 10);
headerbar.set_title(Some("This is your life now"));
headerbar.set_show_close_button(true);
headerbar.pack_start(&increment);
headerbar.pack_start(&decrement);
let audio_player_clone = audio_player.clone();
let combo_box_clone = combo_box.clone();
// Music buttons closures
play_button.connect_clicked(move |_| {
let music = combo_box_clone.get_active_text().unwrap();
let music = format!("{}/{}", MUSIC_FOLDER, music.as_str());
audio_player_clone.play_music(music);
});
let audio_player_clone = audio_player.clone();
pause_button.connect_clicked(move |_| {
audio_player_clone.pause_music();
});
// Connect our method `on_increment_clicked` to be called
// when the increment button is clicked.
increment.connect_clicked(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
priv_.on_increment_clicked();
}));
decrement.connect_clicked(clone!(@weak self_ => move |_| {
let priv_ = SimpleWindowPrivate::from_instance(&self_);
priv_.on_decrement_clicked();
}));
reset.connect_clicked(clone!(@weak self_ => move |_| {
println!("Maybe ;___;");
}));
self_.add(&bbox);
// self_.add(&label);
self_.set_titlebar(Some(&headerbar));
self_.set_default_size(640, 480);
self.widgets
.set(WindowWidgets {
headerbar,
label,
increment,
decrement,
reset,
})
.expect("Failed to initialize window state");
}
}
impl SimpleWindowPrivate {
fn | (&self) {
self.counter.set(self.counter.get() + 1);
let w = self.widgets.get().unwrap();
w.label
.set_text(&format!("Your life has {} meaning", self.counter.get()));
}
fn on_decrement_clicked(&self) {
self.counter.set(self.counter.get().wrapping_sub(1));
let w = self.widgets.get().unwrap();
w.label
.set_text(&format!("Your life has {} meaning", self.counter.get()));
}
}
impl WidgetImpl for SimpleWindowPrivate {}
impl ContainerImpl for SimpleWindowPrivate {}
impl BinImpl for SimpleWindowPrivate {}
impl WindowImpl for SimpleWindowPrivate {}
impl ApplicationWindowImpl for SimpleWindowPrivate {}
glib_wrapper! {
pub struct SimpleWindow(
Object<subclass::simple::InstanceStruct<SimpleWindowPrivate>,
subclass::simple::ClassStruct<SimpleWindowPrivate>,
SimpleAppWindowClass>)
@extends gtk::Widget, gtk::Container, gtk::Bin, gtk::Window, gtk::ApplicationWindow;
match fn {
get_type => || SimpleWindowPrivate::get_type().to_glib(),
}
}
impl SimpleWindow {
pub fn new(app: >k::Application) -> Self {
glib::Object::new(Self::static_type(), &[("application", app)])
.expect("Failed to create SimpleWindow")
.downcast::<SimpleWindow>()
.expect("Created SimpleWindow is of wrong type")
}
}
#[derive(Debug)]
pub struct SimpleApplicationPrivate {
window: OnceCell<SimpleWindow>,
}
impl ObjectSubclass for SimpleApplicationPrivate {
const NAME: &'static str = "SimpleApplicationPrivate";
type ParentType = gtk::Application;
type Instance = subclass::simple::InstanceStruct<Self>;
type Class = subclass::simple::ClassStruct<Self>;
glib_object_subclass!();
fn new() -> Self {
Self {
window: OnceCell::new(),
}
}
}
impl ObjectImpl for SimpleApplicationPrivate {
glib_object_impl!();
}
// When our application starts, the `startup` signal will be fired.
// This gives us a chance to perform initialisation tasks that are not directly
// related to showing a new window. After this, depending on how
// the application is started, either `activate` or `open` will be called next.
impl ApplicationImpl for SimpleApplicationPrivate {
// `gio::Application::activate` is what gets called when the
// application is launched by the desktop environment and
// aksed to present itself.
fn activate(&self, app: &gio::Application) {
let app = app.downcast_ref::<gtk::Application>().unwrap();
let priv_ = SimpleApplicationPrivate::from_instance(app);
let window = priv_
.window
.get()
.expect("Should always be initiliazed in gio_application_startup");
window.show_all();
window.present();
}
// `gio::Application` is bit special. It does not get initialized
// when `new` is called and the object created, but rather
// once the `startup` signal is emitted and the `gio::Application::startup`
// is called.
//
// Due to this, we create and initialize the `SimpleWindow` widget
// here. Widgets can't be created before `startup` has been called.
fn startup(&self, app: &gio::Application) {
self.parent_startup(app);
let app = app.downcast_ref::<gtk::Application>().unwrap();
let priv_ = SimpleApplicationPrivate::from_instance(app);
let window = SimpleWindow::new(&app);
priv_
.window
.set(window)
.expect("Failed to initialize application window");
}
}
impl GtkApplicationImpl for SimpleApplicationPrivate {}
glib_wrapper! {
pub struct SimpleApplication(
Object<subclass::simple::InstanceStruct<SimpleApplicationPrivate>,
subclass::simple::ClassStruct<SimpleApplicationPrivate>,
SimpleApplicationClass>)
@extends gio::Application, gtk::Application;
match fn {
get_type => || SimpleApplicationPrivate::get_type().to_glib(),
}
}
impl SimpleApplication {
pub fn new() -> Self {
glib::Object::new(
Self::static_type(),
&[
("application-id", &"org.gtk-rs.SimpleApplication"),
("flags", &ApplicationFlags::empty()),
],
)
.expect("Failed to create SimpleApp")
.downcast()
.expect("Created simpleapp is of wrong type")
}
}
use std::time::Duration;
use std::io::Seek;
use std::io::SeekFrom;
fn main() {
gtk::init().expect("Failed to initialize gtk");
let app = SimpleApplication::new();
let args: Vec<String> = std::env::args().collect();
app.run(&args);
} | on_increment_clicked | identifier_name |
configfunction.js | //验证数据有效性
function validationData(types,data){
//if(types=="1"||types=="整型"){
if(types=="1"||types=="\u6574\u578B"){
var re=/^-?[0-9]\d*$/;
if(!data.match(re)){
//return "类型为整型的数据值必须是整数。<br>";
return "\u7C7B\u578B\u4E3A\u6574\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6574\u6570\u3002<br>";
}
//}else if(types=="2"||types=="浮点型"){
}else if(types=="2"||types=="\u6D6E\u70B9\u578B"){
var reg1=/^-?([0-9]\d*|0(?!\.0+$))\.\d+?$/;
var reg2=/^-?[0-9]\d*$/;
if(!(reg1.test(data) || reg2.test(data))){
//return "类型为浮点型的数据值必须是浮点型。<br>";
return "\u7C7B\u578B\u4E3A\u6D6E\u70B9\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6D6E\u70B9\u578B\u3002<br>";
}
//}else if(types=="3"||types=="日期型"){
}else if(types=="3"||types=="\u65E5\u671F\u578B"){
var re = /^\d[19|20]\d{2}\-\d{1,2}\-\d{1,2}$/;
if(!data.match(re)){
//return "类型为日期型的数据值必须是日期类型。如 2010-1-2/2010-01-02<br>";
return "\u7C7B\u578B\u4E3A\u65E5\u671F\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u65E5\u671F\u7C7B\u578B\u3002\u5982 2010-1-2/2010-01-02<br>";
}else{
var dateArray = data.split("-");
if(parseInt(dateArray[1],10)>12){
//return "值为'"+data+"'的日期类型数据月份不能超过12个月。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u6708\u4EFD\u4E0D\u80FD\u8D85\u8FC7\u0031\u0032\u4E2A\u6708\u3002<br>";
}
//闰年
if(parseInt(dateArray[0],10)%4==0){
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>29){
//return "值为'"+data+"'的日期类型数据闰年2月份天数不能超过29天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u95F0\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0039\u5929\u3002<br>";
}
}
}else{
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>28){
//return "值为'"+data+"'的日期类型数据平年2月份天数不能超过28天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u5E73\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0038\u5929\u3002<br>";
}
}
}
var dateMonth = "1,3,5,7,8,10,12";
var index = dateMonth.indexOf(parseInt(dateArray[1],10));
if(index==-1){
if(parseInt(dateArray[2],10)>30){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过30天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0030\u5929\u3002<br>";
}
}else{
if(parseInt(dateArray[2],10)>31){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过31天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0031\u5929\u3002<br>";
}
}
if(parseInt(dateArray[2],10)==0){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能为0天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u4E3A\u0030\u5929\u3002<br>";
}
}
//}else if(types=="4"||types=="布尔型"){
}else if(types=="4"||types=="\u5E03\u5C14\u578B"){
if(data!="true" && data!="false" && data!="TRUE" && data!="FALSE"){
//return "类型为布尔型的数据值必须为布尔型。如 true/false<br>";
return "\u7C7B\u578B\u4E3A\u5E03\u5C14\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u4E3A\u5E03\u5C14\u578B\u3002\u5982 true/false<br>";
}
}
return "";
}
/*
*取值
*/
function getValue(){
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
var texts = innerText.replace(new RegExp("<br>|\n|\r","gm"),"").split(",");
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],columnNumber:i,lstConfigValueVO:[]};
jsonHead.push(obj);
}
}
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<columns;j++){
var td = table.rows[i].cells[j];
if(td){
var values = td.getElementsByTagName("INPUT")[0];
var tValue = "";
if(values!=""){
tValue = values.value;
}
//获取属性值创建人
var creatorIdInputElement = td.getElementsByTagName("INPUT")[1];
var creatorId = "";
if(creatorIdInputElement!=""){
creatorId = creatorIdInputElement.value;
if(!creatorId){
creatorId = globalUserId;
}
}
var objValue = {configItemValue:tValue,isDefaultValue:defs,rowNumber:i,creatorId:creatorId};
//放入json对象中
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
return JSON.stringify(jsonHead);
}
//获取基本类型的json值
function getBaseValue(formdata){
var jsonHead = [];
var obj = {attributeName:formdata.configItemName,attributeCode:formdata.configItemFullCode,attributeType:'0',columnNumber:0,lstConfigValueVO:[]};
jsonHead.push(obj);
var configValue = '';
if(formdata.configItemType == '3'){
configValue = formdata.dateConfigValue;
}else if(formdata.configItemType == '4'){
configValue = formdata.booleanConfigValue;
}else{
configValue = formdata.configItemValue;
}
var objValue = {configItemValue:configValue,isDefaultValue:2,rowNumber:0};
jsonHead[0].lstConfigValueVO.push(objValue);
return JSON.stringify(jsonHead);
}
function validateValueField(){
var msg = "";
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
//至少保留一个属性列
if(columns<=1){
//msg+="数据值区域至少应保留一个可填属性的列。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u5C5E\u6027\u7684\u5217\u3002<br>";
}
if(rows<=1){
//msg+="数据值区域至少应保留一个可填值的行。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u503C\u7684\u884C\u3002<br>";
}
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length==1){
//msg+="数据值区域第"+(i+1)+"列题头值不能为空。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}else{
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],lstConfigValueVO:[]};
jsonHead.push(obj);
}
}else{
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}
}
}
if(msg.length>0){
return msg;
}
var msgRowsValidate = "";
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<=count;j++){
//取值
var td = table.rows[i].cells[j];
if(td){
var tempValues = td.getElementsByTagName("INPUT")[0].value;
if(trimSpace(tempValues)!=""){
//校验数据值是否含有不允许的特殊字符
var validateDataRegexInfo = validateDataRegex(trimSpace(tempValues));
if(validateDataRegexInfo){
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+validateDataRegexInfo;
}
flag = false;
//只有值不为空时才进行验证
//验证输入的值是否合法
var attributeType = trimSpace(jsonHead[j-1].attributeType);
var vInfo = validationData(attributeType,trimSpace(tempValues));
if(vInfo!=""){
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列"+vInfo;
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+vInfo;
}
}else{
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列值为空。<br>";
msgRowsValidate += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217\u503C\u4E3A\u7A7A\u3002<br>";
}
var objValue = {configItemValue:trimSpace(tempValues),isDefaultValue:defs,rowNumber:i};
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
//判断编码是否重复
msg+=attributeUnique(jsonHead);
msg = msg + ":" + msgRowsValidate;
return msg;
}
//验证单元格里面的特殊字符问题
function validateDataRegex(data){
var re=/\"/;
if(data.match(re)){
//return "数据值不能包含"。<br>";
return "\u6570\u636E\u503C\u4E0D\u80FD\u5305\u542B\u0022\u3002<br>";
}
}
//div确定按钮事件
function confirmEvent(){
var valid = window.validater.validElement('AREA','#attr');
if(!valid[2]){
return ;
}
var temp = document.getElementById("temp").value;
var attrName = cui('#attrName').getValue();
var attrCode = cui('#attrCode').getValue();
var attrType = cui('#attrType').getText();
var type = attrType;
var msg = '';
if(trimSpace(attrName).length==0){
//msg+="属性名称不能为空<br>";
msg+="\u5C5E\u6027\u540D\u79F0\u4E0D\u80FD\u4E3A\u7A7A<br>";
}
if(trimSpace(attrCode).length==0){
//msg+="属性编码不能为空<br>";
msg+="\u5C5E\u6027\u7F16\u7801\u4E0D\u80FD\u4E3A\u7A7A<br>";
}else{
msg += isRepeat(attrCode,temp);
}
if(msg.length>0){
cui.alert(msg);
return;
}
//var info = "名称:"+trimSpace(attrName)+",编码:"+trimSpace(attrCode)+",类型:"+type;
var info = "\u540D\u79F0:"+trimSpace(attrName)+",\u7F16\u7801:"+trimSpace(attrCode)+",\u7C7B\u578B:"+type;
document.getElementById("hiddenValue"+temp).value = info;
document.getElementById("span"+temp).innerText=cutOutSide(trimSpace(attrName),9);
document.getElementById("span"+temp).title=trimSpace(attrCode);
document.getElementById("attr").style.display='none';
}
//点击div上的确定时判断编码是否重复
function isRepeat(attrCode,index){
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length!=1){
if(i!=index){
if(trimSpace(attrCode)==trimSpace(texts[1].split(":")[1])){
//return "编码和第"+(i+1)+"列重复了";
return "\u7F16\u7801\u548C\u7B2C"+(i+1)+"\u5217\u91CD\u590D\u4E86";
}
}
}
}
}
}
return "";
}
//显示div
function showDiv(index,event){
var scroll = document.body.scrollLeft;
var scrollY = document.body.scrollTop;
document.getElementById("temp").value=index;
var div = document.getElementById("attr");
div.style.display="block";
if(event.clientX<200){
div.style.left=(event.clientX+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}else{
div.style.left=(event.clientX-200+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}
setSelectColumnValue(index);
}
//隐藏div
function hideDiv(){
document.getElementById("attr").style.display = "none";
}
//检验属性编码的唯一性
function attributeUnique(jsonValue){
for(var i=0;i<jsonValue.length;i++){
for(var j=0;j<jsonValue.length;j++){
if(i!=j){
if(jsonValue[i].attributeCode==jsonValue[j].attributeCode){
//return "第"+(i+3)+"列的属性编码和第"+(j+3)+"列的属性编码重复了<br>";
return "\u7B2C"+(i+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u548C\u7B2C"+(j+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u91CD\u590D\u4E86<br>";
}
}
}
}
return "";
}
/**
* 把值转换成json
* @return
*/
function transValueToJson(){
var attrValues = document.getElementsByName("attrValue");
var json = [];
for(var i=0,j=attrValues.length;i<j;i++){
var valuesInfo = attrValues[i].value.split("###");
var tempJson = {columnNum:valuesInfo[0],defaultValue:valuesInfo[1],rowNumber:valuesInfo[2],creatorId:valuesInfo[3],values:valuesInfo[4]};
json.push(tempJson);
}
return json;
}
function addBorder(obj){
obj.className = "input_mouseover";
}
function remov | obj.className = "input_mouseout";
} | eBorder(obj){
| identifier_name |
configfunction.js | //验证数据有效性
function validationData(types,data){
//if(types=="1"||types=="整型"){
if(types=="1"||types=="\u6574\u578B"){
var re=/^-?[0-9]\d*$/;
if(!data.match(re)){
//return "类型为整型的数据值必须是整数。<br>";
return "\u7C7B\u578B\u4E3A\u6574\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6574\u6570\u3002<br>";
}
//}else if(types=="2"||types=="浮点型"){
}else if(types=="2"||types=="\u6D6E\u70B9\u578B"){
var reg1=/^-?([0-9]\d*|0(?!\.0+$))\.\d+?$/;
var reg2=/^-?[0-9]\d*$/;
if(!(reg1.test(data) || reg2.test(data))){
//return "类型为浮点型的数据值必须是浮点型。<br>";
return "\u7C7B\u578B\u4E3A\u6D6E\u70B9\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6D6E\u70B9\u578B\u3002<br>";
}
//}else if(types=="3"||types=="日期型"){
}else if(types=="3"||types=="\u65E5\u671F\u578B"){
var re = /^\d[19|20]\d{2}\-\d{1,2}\-\d{1,2}$/;
if(!data.match(re)){
//return "类型为日期型的数据值必须是日期类型。如 2010-1-2/2010-01-02<br>";
return "\u7C7B\u578B\u4E3A\u65E5\u671F\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u65E5\u671F\u7C7B\u578B\u3002\u5982 2010-1-2/2010-01-02<br>";
}else{
var dateArray = data.split("-");
if(parseInt(dateArray[1],10)>12){
//return "值为'"+data+"'的日期类型数据月份不能超过12个月。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u6708\u4EFD\u4E0D\u80FD\u8D85\u8FC7\u0031\u0032\u4E2A\u6708\u3002<br>";
}
//闰年
if(parseInt(dateArray[0],10)%4==0){
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>29){
//return "值为'"+data+"'的日期类型数据闰年2月份天数不能超过29天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u95F0\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0039\u5929\u3002<br>";
}
}
}else{
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>28){
//return "值为'"+data+"'的日期类型数据平年2月份天数不能超过28天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u5E73\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0038\u5929\u3002<br>";
}
}
}
var dateMonth = "1,3,5,7,8,10,12";
var index = dateMonth.indexOf(parseInt(dateArray[1],10));
if(index==-1){
if(parseInt(dateArray[2],10)>30){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过30天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0030\u5929\u3002<br>";
}
}else{
if(parseInt(dateArray[2],10)>31){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过31天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0031\u5929\u3002<br>";
}
}
if(parseInt(dateArray[2],10)==0){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能为0天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u4E3A\u0030\u5929\u3002<br>";
}
}
//}else if(types=="4"||types=="布尔型"){
}else if(types=="4"||types=="\u5E03\u5C14\u578B"){
if(data!="true" && data!="false" && data!="TRUE" && data!="FALSE"){
//return "类型为布尔型的数据值必须为布尔型。如 true/false<br>";
return "\u7C7B\u578B\u4E3A\u5E03\u5C14\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u4E3A\u5E03\u5C14\u578B\u3002\u5982 true/false<br>";
}
}
return "";
}
/*
*取值
*/
function getValue(){
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
var texts = innerText.replace(new RegExp("<br>|\n|\r","gm"),"").split(",");
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],columnNumber:i,lstConfigValueVO:[]};
jsonHead.push(obj);
}
}
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<columns;j++){
var td = table.rows[i].cells[j];
if(td){
var values = td.getElementsByTagName("INPUT")[0];
var tValue = "";
if(values!=""){
tValue = values.value;
}
//获取属性值创建人
var creatorIdInputElement = td.getElementsByTagName("INPUT")[1];
var creatorId = "";
if(creatorIdInputElement!=""){
creatorId = creatorIdInputElement.value;
if(!creatorId){
creatorId = globalUserId;
}
}
var objValue = {configItemValue:tValue,isDefaultValue:defs,rowNumber:i,creatorId:creatorId};
//放入json对象中
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
return JSON.stringify(jsonHead);
}
//获取基本类型的json值
function getBaseValue(formdata){
var jsonHead = [];
var obj = {attributeName:formdata.configItemName,attributeCode:formdata.configItemFullCode,attributeType:'0',columnNumber:0,lstConfigValueVO:[]};
jsonHead.push(obj);
var configValue = '';
if(formdata.configItemType == '3'){
configValue = formdata.dateConfigValue;
}else if(formdata.configItemType == '4'){
configValue = formdata.booleanConfigValue;
}else{
configValue = formdata.configItemValue;
}
var objValue = {configItemValue:configValue,isDefaultValue:2,rowNumber:0};
jsonHead[0].lstConfigValueVO.push(objValue);
return JSON.stringify(jsonHead);
}
function validateValueField(){
var msg = "";
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
//至少保留一个属性列
if(columns<=1){
//msg+="数据值区域至少应保留一个可填属性的列。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u5C5E\u6027\u7684\u5217\u3002<br>";
}
if(rows<=1){
//msg+="数据值区域至少应保留一个可填值的行。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u503C\u7684\u884C\u3002<br>";
}
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length==1){
//msg+="数据值区域第"+(i+1)+"列题头值不能为空。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}else{
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],lstConfigValueVO:[]};
jsonHead.push(obj);
}
}else{
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}
}
}
if(msg.length>0){
return msg;
}
var msgRowsValidate = "";
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<=count;j++){
//取值
var td = table.rows[i].cells[j]; | var tempValues = td.getElementsByTagName("INPUT")[0].value;
if(trimSpace(tempValues)!=""){
//校验数据值是否含有不允许的特殊字符
var validateDataRegexInfo = validateDataRegex(trimSpace(tempValues));
if(validateDataRegexInfo){
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+validateDataRegexInfo;
}
flag = false;
//只有值不为空时才进行验证
//验证输入的值是否合法
var attributeType = trimSpace(jsonHead[j-1].attributeType);
var vInfo = validationData(attributeType,trimSpace(tempValues));
if(vInfo!=""){
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列"+vInfo;
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+vInfo;
}
}else{
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列值为空。<br>";
msgRowsValidate += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217\u503C\u4E3A\u7A7A\u3002<br>";
}
var objValue = {configItemValue:trimSpace(tempValues),isDefaultValue:defs,rowNumber:i};
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
//判断编码是否重复
msg+=attributeUnique(jsonHead);
msg = msg + ":" + msgRowsValidate;
return msg;
}
//验证单元格里面的特殊字符问题
function validateDataRegex(data){
var re=/\"/;
if(data.match(re)){
//return "数据值不能包含"。<br>";
return "\u6570\u636E\u503C\u4E0D\u80FD\u5305\u542B\u0022\u3002<br>";
}
}
//div确定按钮事件
function confirmEvent(){
var valid = window.validater.validElement('AREA','#attr');
if(!valid[2]){
return ;
}
var temp = document.getElementById("temp").value;
var attrName = cui('#attrName').getValue();
var attrCode = cui('#attrCode').getValue();
var attrType = cui('#attrType').getText();
var type = attrType;
var msg = '';
if(trimSpace(attrName).length==0){
//msg+="属性名称不能为空<br>";
msg+="\u5C5E\u6027\u540D\u79F0\u4E0D\u80FD\u4E3A\u7A7A<br>";
}
if(trimSpace(attrCode).length==0){
//msg+="属性编码不能为空<br>";
msg+="\u5C5E\u6027\u7F16\u7801\u4E0D\u80FD\u4E3A\u7A7A<br>";
}else{
msg += isRepeat(attrCode,temp);
}
if(msg.length>0){
cui.alert(msg);
return;
}
//var info = "名称:"+trimSpace(attrName)+",编码:"+trimSpace(attrCode)+",类型:"+type;
var info = "\u540D\u79F0:"+trimSpace(attrName)+",\u7F16\u7801:"+trimSpace(attrCode)+",\u7C7B\u578B:"+type;
document.getElementById("hiddenValue"+temp).value = info;
document.getElementById("span"+temp).innerText=cutOutSide(trimSpace(attrName),9);
document.getElementById("span"+temp).title=trimSpace(attrCode);
document.getElementById("attr").style.display='none';
}
//点击div上的确定时判断编码是否重复
function isRepeat(attrCode,index){
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length!=1){
if(i!=index){
if(trimSpace(attrCode)==trimSpace(texts[1].split(":")[1])){
//return "编码和第"+(i+1)+"列重复了";
return "\u7F16\u7801\u548C\u7B2C"+(i+1)+"\u5217\u91CD\u590D\u4E86";
}
}
}
}
}
}
return "";
}
//显示div
function showDiv(index,event){
var scroll = document.body.scrollLeft;
var scrollY = document.body.scrollTop;
document.getElementById("temp").value=index;
var div = document.getElementById("attr");
div.style.display="block";
if(event.clientX<200){
div.style.left=(event.clientX+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}else{
div.style.left=(event.clientX-200+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}
setSelectColumnValue(index);
}
//隐藏div
function hideDiv(){
document.getElementById("attr").style.display = "none";
}
//检验属性编码的唯一性
function attributeUnique(jsonValue){
for(var i=0;i<jsonValue.length;i++){
for(var j=0;j<jsonValue.length;j++){
if(i!=j){
if(jsonValue[i].attributeCode==jsonValue[j].attributeCode){
//return "第"+(i+3)+"列的属性编码和第"+(j+3)+"列的属性编码重复了<br>";
return "\u7B2C"+(i+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u548C\u7B2C"+(j+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u91CD\u590D\u4E86<br>";
}
}
}
}
return "";
}
/**
* 把值转换成json
* @return
*/
function transValueToJson(){
var attrValues = document.getElementsByName("attrValue");
var json = [];
for(var i=0,j=attrValues.length;i<j;i++){
var valuesInfo = attrValues[i].value.split("###");
var tempJson = {columnNum:valuesInfo[0],defaultValue:valuesInfo[1],rowNumber:valuesInfo[2],creatorId:valuesInfo[3],values:valuesInfo[4]};
json.push(tempJson);
}
return json;
}
function addBorder(obj){
obj.className = "input_mouseover";
}
function removeBorder(obj){
obj.className = "input_mouseout";
} | if(td){ | random_line_split |
configfunction.js | //验证数据有效性
function validationData(types,data){
//if(types=="1"||types=="整型"){
if(types=="1"||types=="\u6574\u578B"){
var re=/^-?[0-9]\d*$/;
if(!data.match(re)){
//return "类型为整型的数据值必须是整数。<br>";
return "\u7C7B\u578B\u4E3A\u6574\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6574\u6570\u3002<br>";
}
//}else if(types=="2"||types=="浮点型"){
}else if(types=="2"||types=="\u6D6E\u70B9\u578B"){
var reg1=/^-?([0-9]\d*|0(?!\.0+$))\.\d+?$/;
var reg2=/^-?[0-9]\d*$/;
if(!(reg1.test(data) || reg2.test(data))){
//return "类型为浮点型的数据值必须是浮点型。<br>";
return "\u7C7B\u578B\u4E3A\u6D6E\u70B9\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6D6E\u70B9\u578B\u3002<br>";
}
//}else if(types=="3"||types=="日期型"){
}else if(types=="3"||types=="\u65E5\u671F\u578B"){
var re = /^\d[19|20]\d{2}\-\d{1,2}\-\d{1,2}$/;
if(!data.match(re)){
//return "类型为日期型的数据值必须是日期类型。如 2010-1-2/2010-01-02<br>";
return "\u7C7B\u578B\u4E3A\u65E5\u671F\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u65E5\u671F\u7C7B\u578B\u3002\u5982 2010-1-2/2010-01-02<br>";
}else{
var dateArray = data.split("-");
if(parseInt(dateArray[1],10)>12){
//return "值为'"+data+"'的日期类型数据月份不能超过12个月。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u6708\u4EFD\u4E0D\u80FD\u8D85\u8FC7\u0031\u0032\u4E2A\u6708\u3002<br>";
}
//闰年
if(parseInt(dateArray[0],10)%4==0){
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>29){
//return "值为'"+data+"'的日期类型数据闰年2月份天数不能超过29天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u95F0\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0039\u5929\u3002<br>";
}
}
}else{
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>28){
//return "值为'"+data+"'的日期类型数据平年2月份天数不能超过28天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u5E73\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0038\u5929\u3002<br>";
}
}
}
var dateMonth = "1,3,5,7,8,10,12";
var index = dateMonth.indexOf(parseInt(dateArray[1],10));
if(index==-1){
if(parseInt(dateArray[2],10)>30){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过30天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0030\u5929\u3002<br>";
}
}else{
if(parseInt(dateArray[2],10)>31){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过31天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0031\u5929\u3002<br>";
}
}
if(parseInt(dateArray[2],10)==0){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能为0天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u4E3A\u0030\u5929\u3002<br>";
}
}
//}else if(types=="4"||types=="布尔型"){
}else if(types=="4"||types=="\u5E03\u5C14\u578B"){
if(data!="true" && data!="false" && data!="TRUE" && data!="FALSE"){
//return "类型为布尔型的数据值必须为布尔型。如 true/false<br>";
return "\u7C7B\u578B\u4E3A\u5E03\u5C14\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u4E3A\u5E03\u5C14\u578B\u3002\u5982 true/false<br>";
}
}
return "";
}
/*
*取值
*/
function getValue(){
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
var texts = innerText.replace(new RegExp("<br>|\n|\r","gm"),"").split(",");
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],columnNumber:i,lstConfigValueVO:[]};
jsonHead.push(obj);
}
}
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<columns;j++){
var td = table.rows[i].cells[j];
if(td){
var values = td.getElementsByTagName("INPUT")[0];
var tValue = "";
if(values!=""){
tValue = values.value;
}
//获取属性值创建人
var creatorIdInputElement = td.getElementsByTagName("INPUT")[1];
var creatorId = "";
if(creatorIdInputElement!=""){
creatorId = creatorIdInputElement.value;
if(!creatorId){
creatorId = globalUserId;
}
}
var objValue = {configItemValue:tValue,isDefaultValue:defs,rowNumber:i,creatorId:creatorId};
//放入json对象中
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
return JSON.stringify(jsonHead);
}
//获取基本类型的json值
function getBaseValue(formdata){
var jsonHead = [];
var obj = {attributeName:formdata.configItemName,attributeCode:formdata.configItemFullCode,attributeType:'0',columnNumber:0,lstConfigValueVO:[]};
jsonHead.push(obj);
var configValue = '';
if(formdata.configItemType == '3'){
configValue = formdata.dateConfigValue;
}else if(formdata.configItemType == '4'){
configValue = formdata.booleanConfigValue;
}else{
configValue = formdata.configItemValue;
}
var objValue = {configItemValue:configValue,isDefaultValue:2,rowNumber:0};
jsonHead[0].lstConfigValueVO.push(objValue);
return JSON.stringify(jsonHead);
}
function validateValueField(){
var msg = "";
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
//至少保留一个属性列
if(columns<=1){
//msg+="数据值区域至少应保留一个可填属性的列。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u5C5E\u6027\u7684\u5217\u3002<br>";
}
if(rows<=1){
//msg+="数据值区域至少应保留一个可填值的行。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u503C\u7684\u884C\u3002<br>";
}
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length==1){
//msg+="数据值区域第"+(i+1)+"列题头值不能为空。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}else{
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],lstConfigValueVO:[]};
jsonHead.push(obj);
}
}else{
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}
}
}
if(msg.length>0){
return msg;
}
var msgRowsValidate = "";
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<=count;j++){
//取值
var td = table.rows[i].cells[j];
if(td){
var tempValues = td.getElementsByTagName("INPUT")[0].value;
if(trimSpace(tempValues)!=""){
//校验数据值是否含有不允许的特殊字符
var validateDataRegexInfo = validateDataRegex(trimSpace(tempValues));
if(validateDataRegexInfo){
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+validat | 40D\u79F0\u4E0D\u80FD\u4E3A\u7A7A<br>";
}
if(trimSpace(attrCode).length==0){
//msg+="属性编码不能为空<br>";
msg+="\u5C5E\u6027\u7F16\u7801\u4E0D\u80FD\u4E3A\u7A7A<br>";
}else{
msg += isRepeat(attrCode,temp);
}
if(msg.length>0){
cui.alert(msg);
return;
}
//var info = "名称:"+trimSpace(attrName)+",编码:"+trimSpace(attrCode)+",类型:"+type;
var info = "\u540D\u79F0:"+trimSpace(attrName)+",\u7F16\u7801:"+trimSpace(attrCode)+",\u7C7B\u578B:"+type;
document.getElementById("hiddenValue"+temp).value = info;
document.getElementById("span"+temp).innerText=cutOutSide(trimSpace(attrName),9);
document.getElementById("span"+temp).title=trimSpace(attrCode);
document.getElementById("attr").style.display='none';
}
//点击div上的确定时判断编码是否重复
function isRepeat(attrCode,index){
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length!=1){
if(i!=index){
if(trimSpace(attrCode)==trimSpace(texts[1].split(":")[1])){
//return "编码和第"+(i+1)+"列重复了";
return "\u7F16\u7801\u548C\u7B2C"+(i+1)+"\u5217\u91CD\u590D\u4E86";
}
}
}
}
}
}
return "";
}
//显示div
function showDiv(index,event){
var scroll = document.body.scrollLeft;
var scrollY = document.body.scrollTop;
document.getElementById("temp").value=index;
var div = document.getElementById("attr");
div.style.display="block";
if(event.clientX<200){
div.style.left=(event.clientX+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}else{
div.style.left=(event.clientX-200+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}
setSelectColumnValue(index);
}
//隐藏div
function hideDiv(){
document.getElementById("attr").style.display = "none";
}
//检验属性编码的唯一性
function attributeUnique(jsonValue){
for(var i=0;i<jsonValue.length;i++){
for(var j=0;j<jsonValue.length;j++){
if(i!=j){
if(jsonValue[i].attributeCode==jsonValue[j].attributeCode){
//return "第"+(i+3)+"列的属性编码和第"+(j+3)+"列的属性编码重复了<br>";
return "\u7B2C"+(i+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u548C\u7B2C"+(j+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u91CD\u590D\u4E86<br>";
}
}
}
}
return "";
}
/**
* 把值转换成json
* @return
*/
function transValueToJson(){
var attrValues = document.getElementsByName("attrValue");
var json = [];
for(var i=0,j=attrValues.length;i<j;i++){
var valuesInfo = attrValues[i].value.split("###");
var tempJson = {columnNum:valuesInfo[0],defaultValue:valuesInfo[1],rowNumber:valuesInfo[2],creatorId:valuesInfo[3],values:valuesInfo[4]};
json.push(tempJson);
}
return json;
}
function addBorder(obj){
obj.className = "input_mouseover";
}
function removeBorder(obj){
obj.className = "input_mouseout";
} | eDataRegexInfo;
}
flag = false;
//只有值不为空时才进行验证
//验证输入的值是否合法
var attributeType = trimSpace(jsonHead[j-1].attributeType);
var vInfo = validationData(attributeType,trimSpace(tempValues));
if(vInfo!=""){
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列"+vInfo;
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+vInfo;
}
}else{
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列值为空。<br>";
msgRowsValidate += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217\u503C\u4E3A\u7A7A\u3002<br>";
}
var objValue = {configItemValue:trimSpace(tempValues),isDefaultValue:defs,rowNumber:i};
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
//判断编码是否重复
msg+=attributeUnique(jsonHead);
msg = msg + ":" + msgRowsValidate;
return msg;
}
//验证单元格里面的特殊字符问题
function validateDataRegex(data){
var re=/\"/;
if(data.match(re)){
//return "数据值不能包含"。<br>";
return "\u6570\u636E\u503C\u4E0D\u80FD\u5305\u542B\u0022\u3002<br>";
}
}
//div确定按钮事件
function confirmEvent(){
var valid = window.validater.validElement('AREA','#attr');
if(!valid[2]){
return ;
}
var temp = document.getElementById("temp").value;
var attrName = cui('#attrName').getValue();
var attrCode = cui('#attrCode').getValue();
var attrType = cui('#attrType').getText();
var type = attrType;
var msg = '';
if(trimSpace(attrName).length==0){
//msg+="属性名称不能为空<br>";
msg+="\u5C5E\u6027\u5 | conditional_block |
configfunction.js | //验证数据有效性
function validationData(types,data){
//if(types=="1"||types=="整型"){
if(types=="1"||types=="\u6574\u578B"){
var re=/^-?[0-9]\d*$/;
if(!data.match(re)){
//return "类型为整型的数据值必须是整数。<br>";
return "\u7C7B\u578B\u4E3A\u6574\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6574\u6570\u3002<br>";
}
//}else if(types=="2"||types=="浮点型"){
}else if(types=="2"||types=="\u6D6E\u70B9\u578B"){
var reg1=/^-?([0-9]\d*|0(?!\.0+$))\.\d+?$/;
var reg2=/^-?[0-9]\d*$/;
if(!(reg1.test(data) || reg2.test(data))){
//return "类型为浮点型的数据值必须是浮点型。<br>";
return "\u7C7B\u578B\u4E3A\u6D6E\u70B9\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u6D6E\u70B9\u578B\u3002<br>";
}
//}else if(types=="3"||types=="日期型"){
}else if(types=="3"||types=="\u65E5\u671F\u578B"){
var re = /^\d[19|20]\d{2}\-\d{1,2}\-\d{1,2}$/;
if(!data.match(re)){
//return "类型为日期型的数据值必须是日期类型。如 2010-1-2/2010-01-02<br>";
return "\u7C7B\u578B\u4E3A\u65E5\u671F\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u662F\u65E5\u671F\u7C7B\u578B\u3002\u5982 2010-1-2/2010-01-02<br>";
}else{
var dateArray = data.split("-");
if(parseInt(dateArray[1],10)>12){
//return "值为'"+data+"'的日期类型数据月份不能超过12个月。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u6708\u4EFD\u4E0D\u80FD\u8D85\u8FC7\u0031\u0032\u4E2A\u6708\u3002<br>";
}
//闰年
if(parseInt(dateArray[0],10)%4==0){
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>29){
//return "值为'"+data+"'的日期类型数据闰年2月份天数不能超过29天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u95F0\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0039\u5929\u3002<br>";
}
}
}else{
if(parseInt(dateArray[1],10)==2){
if(parseInt(dateArray[2],10)>28){
//return "值为'"+data+"'的日期类型数据平年2月份天数不能超过28天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E\u5E73\u5E74\u0032\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0032\u0038\u5929\u3002<br>";
}
}
}
var dateMonth = "1,3,5,7,8,10,12";
var index = dateMonth.indexOf(parseInt(dateArray[1],10));
if(index==-1){
if(parseInt(dateArray[2],10)>30){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过30天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0030\u5929\u3002<br>";
}
}else{
if(parseInt(dateArray[2],10)>31){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能超过31天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u8D85\u8FC7\u0033\u0031\u5929\u3002<br>";
}
}
if(parseInt(dateArray[2],10)==0){
//return "值为'"+data+"'的日期类型数据"+parseInt(dateArray[1],10)+"月份天数不能为0天。<br>";
return "\u503C\u4E3A'"+data+"'\u7684\u65E5\u671F\u7C7B\u578B\u6570\u636E"+parseInt(dateArray[1],10)+"\u6708\u4EFD\u5929\u6570\u4E0D\u80FD\u4E3A\u0030\u5929\u3002<br>";
}
}
//}else if(types=="4"||types=="布尔型"){
}else if(types=="4"||types=="\u5E03\u5C14\u578B"){
if(data!="true" && data!="false" && data!="TRUE" && data!="FALSE"){
//return "类型为布尔型的数据值必须为布尔型。如 true/false<br>";
return "\u7C7B\u578B\u4E3A\u5E03\u5C14\u578B\u7684\u6570\u636E\u503C\u5FC5\u987B\u4E3A\u5E03\u5C14\u578B\u3002\u5982 true/false<br>";
}
}
return "";
}
/*
*取值
*/
function getValue(){
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
var texts = innerText.replace(new RegExp("<br>|\n|\r","gm"),"").split(",");
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],columnNumber:i,lstConfigValueVO:[]};
jsonHead.push(obj);
}
}
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<columns;j++){
var td = table.rows[i].cells[j];
if(td){
var values = td.getElementsByTagName("INPUT")[0];
var tValue = "";
if(values!=""){
tValue = values.value;
}
//获取属性值创建人
var creatorIdInputElement = td.getElementsByTagName("INPUT")[1];
var creatorId = "";
if(creatorIdInputElement!=""){
creatorId = creatorIdInputElement.value;
if(!creatorId){
creatorId = globalUserId;
}
}
var objValue = {configItemValue:tValue,isDefaultValue:defs,rowNumber:i,creatorId:creatorId};
//放入json对象中
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
return JSON.stringify(jsonHead);
}
//获取基本类型的json值
function getBaseValue(formdata){
var jsonHead = [];
var obj = {attributeName:formdata.configItemName,attributeCode:formdata.configItemFullCode,attributeType:'0',columnNumber:0,lstConfigValueVO:[]};
jsonHead.push(obj);
var configValue = '';
if(formdata.configItemType == '3'){
configValue = formdata.dateConfigValue;
}else if(formdata.configItemType == '4'){
configValue = formdata.booleanConfigValue;
}else{
configValue = formdata.configItemValue;
}
var objValue = {configItemValue:configValue,isDefaultValue:2,rowNumber:0};
jsonHead[0].lstConfigValueVO.push(objValue);
return JSON.stringify(jsonHead);
}
function validateValueField(){
var msg = "";
var jsonHead = [];
//获取tabl对象
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
//至少保留一个属性列
if(columns<=1){
//msg+="数据值区域至少应保留一个可填属性的列。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u5C5E\u6027\u7684\u5217\u3002<br>";
}
if(rows<=1){
//msg+="数据值区域至少应保留一个可填值的行。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u81F3\u5C11\u5E94\u4FDD\u7559\u4E00\u4E2A\u53EF\u586B\u503C\u7684\u884C\u3002<br>";
}
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length==1){
//msg+="数据值区域第"+(i+1)+"列题头值不能为空。<br>";
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}else{
var obj={attributeName:texts[0].split(":")[1],attributeCode:texts[1].split(":")[1],attributeType:texts[2].split(":")[1],lstConfigValueVO:[]};
jsonHead.push(obj);
}
}else{
msg+="\u6570\u636E\u503C\u533A\u57DF\u7B2C"+(i+1)+"\u5217\u9898\u5934\u503C\u4E0D\u80FD\u4E3A\u7A7A\u3002<br>";
}
}
}
if(msg.length>0){
return msg;
}
var msgRowsValidate = "";
for(var i=1;i<rows;i++){
var defs;
var def = table.rows[i].cells[0].getElementsByTagName("IMG")[1];
if(def.src.search("undefault")=="-1"){
defs ="1";
}else{
defs ="2";
}
var flag = true;
for(var j=1;j<=count;j++){
//取值
var td = table.rows[i].cells[j];
if(td){
var tempValues = td.getElementsByTagName("INPUT")[0].value;
if(trimSpace(tempValues)!=""){
//校验数据值是否含有不允许的特殊字符
var validateDataRegexInfo = validateDataRegex(trimSpace(tempValues));
if(validateDataRegexInfo){
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+validateDataRegexInfo;
}
flag = false;
//只有值不为空时才进行验证
//验证输入的值是否合法
var attributeType = trimSpace(jsonHead[j-1].attributeType);
var vInfo = validationData(attributeType,trimSpace(tempValues));
if(vInfo!=""){
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列"+vInfo;
msg += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217"+vInfo;
}
}else{
//msg += "数据值区域第"+i+"行,第"+(j+1)+"列值为空。<br>";
msgRowsValidate += "\u6570\u636E\u503C\u533A\u57DF\u7B2C"+i+"\u884C\u002C\u7B2C"+(j+1)+"\u5217\u503C\u4E3A\u7A7A\u3002<br>";
}
var objValue = {configItemValue:trimSpace(tempValues),isDefaultValue:defs,rowNumber:i};
jsonHead[j-1].lstConfigValueVO.push(objValue);
}
}
}
//判断编码是否重复
msg+=attributeUnique(jsonHead);
msg = msg + ":" + msgRowsValidate;
return msg;
}
//验证单元格里面的特殊字符问题
function validateDataRegex(data){
var re=/\"/;
if(data.match(re)){
//return "数据值不能包含"。<br>";
return "\u6570\u636E\u503C\u4E0D\u80FD\u5305\u542B\u0022\u3002<br>";
}
}
//div确定按钮事件
function confirmEvent(){
var valid = window.validater.validElement('AREA','#attr');
if(!valid[2]){
return ;
}
var temp = document.getElementById("temp").value;
var attrName = cui('#attrName').getValue();
var attrCode = cui('#attrCode').getValue();
var attrType = cui('#attrType').getText();
var type = attrType;
var msg = '';
if(trimSpace(attrName).length==0){
//msg+="属性名称不能为空<br>";
msg+="\u5C5E\u6027\u540D\u79F0\u4E0D\u80FD\u4E3A\u7A7A<br>";
}
if(trimSpace(attrCode).length==0){
//msg+="属性编码不能为空<br>";
msg+="\u5C5E\u6027\u7F16\u7801\u4E0D\u80FD\u4E3A\u7A7A<br>";
}else{
msg += isRepeat(attrCode,temp);
}
if(msg.length>0){
cui.alert(msg);
return;
}
//var info = "名称:"+trimSpace(attrName)+",编码:"+trimSpace(attrCode)+",类型:"+type;
var info = "\u540D\u79F0:"+trimSpace(attrName)+",\u7F16\u7801:"+trimSpace(attrCode)+",\u7C7B\u578B:"+type;
document.getElementById("hiddenValue"+temp).value = info;
document.getElementById("span"+temp).innerText=cutOutSide(trimSpace(attrName),9);
document.getElementById("span"+temp).title=trimSpace(attrCode);
document.getElementById("attr").style.display='none';
}
//点击div上的确定时判断编码是否重复
function isRepeat(attrCode,index){
var table = document.getElementById("tdList");
var rows = table.rows.length;
var columns = table.rows[0].cells.length;
for(var i=1;i<=count;i++){
var hiddenObj = document.getElementById('hiddenValue'+i);
if(hiddenObj){
var innerText = hiddenObj.value;
if(innerText){
var texts = innerText.split(",");
if(texts[0].split(":").length!=1){
if(i!=index){
if(trimSpace(attrCode)==trimSpace(texts[1].split(":")[1])){
//return "编码和第"+(i+1)+"列重复了";
return "\u7F16\u7801\u548C\u7B2C"+(i+1)+"\u5217\u91CD\u590D\u4E86";
}
}
}
}
}
}
return "";
}
//显示div
function showDiv(index,event){
var scroll = document.body.scrollLeft;
var scrollY = document.body.scrollTop;
document.getElementById("temp").value=index;
var div = document.getElementById("attr");
div.style.display="block";
if(event.clientX<200){
div.style.left=(event.clientX+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}else{
div.style.left=(event.clientX-200+scroll) + 'px';
if(scrollY>0){
div.style.top=(event.clientY+scrollY) + 'px';
}else{
div.style.top=event.clientY + 'px';
}
}
setSelectColumnValue(index);
}
//隐藏div
function hideDiv(){
document.getElementById("attr").style.display = "none";
}
//检验属性编码的唯一性
function attributeUnique(jsonValue){
for(var i=0;i<jsonValue.length;i++){
for(var j=0;j<jsonValue.length;j++){
if(i!=j){
if(jsonValue[i].attributeCode==jsonValue[j].attributeCode){
//return "第"+(i+3)+"列的属性编码和第"+(j+3)+"列的属性编码重复了<br>";
return "\u7B2C"+(i+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u548C\u7B2C"+(j+3)+"\u5217\u7684\u5C5E\u6027\u7F16\u7801\u91CD\u590D\u4E86<br>";
}
}
}
}
return "";
}
/**
* 把值转换成json
* @return
*/
function transValueToJson(){
var attrValues = document.getElementsByName("attrValue");
var json = [];
for(var i=0,j=attrValues.length;i<j;i++){
var valuesInfo = attrValues[i].value.split("###");
var tempJson = {columnNum:valuesInfo[0],defaultValue:valuesInfo[1],rowNumber:valuesInfo[2],creatorId:valuesInfo[3],values:valuesInfo[4]};
json.push(tempJson);
}
return json;
}
function addBorder(obj){
obj.className = "input_mouseover";
}
function removeBorder(obj){
obj.className = "input_mouseout";
} | identifier_body | ||
lstm.py |
import pandas as pd
from sklearn import model_selection, preprocessing, linear_model, metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
df=pd.read_csv('yelp_academic_dataset_review.csv',nrows=100000)
# df.head()
# print(df.shape)
df_filtered=df[df['stars'] !=3]
# print(df_filtered.shape)
#print(df_filtered.describe().T)
text=list(df_filtered['text'])
stars=list(df_filtered['stars'])
print(type(text))
label=[]
for item in stars:
if item>= 4:
y=1
else:
y=0
label.append(y)
label=np.array(label)
#we can get punctuation from string library
from string import punctuation
print(punctuation)
all_reviews=[]
for item in text:
item = item.lower()
item = "".join([ch for ch in item if ch not in punctuation])
all_reviews.append(item)
all_text = " ".join(all_reviews)
print(all_text[0:20])
all_words = all_text.split()
print(all_words[0:10])
from collections import Counter
# Count all the words using Counter Method
count_words = Counter(all_words)
total_words=len(all_words)
sorted_words=count_words.most_common(total_words)
#print(sorted_words[:30])
vocab_to_int={w:i+1 for i,(w,c) in enumerate(sorted_words)}
#print(vocab_to_int)
#reviews_ints = []
#for review in all_words:
#reviews_ints.append([vocab_to_int[word] for word in all_words])
encoded_reviews=list()
for review in all_reviews:
encoded_review=list()
for word in review.split():
if word not in vocab_to_int.keys():
#if word is not available in vocab_to_int put 0 in that place
encoded_review.append(0)
else:
encoded_review.append(vocab_to_int[word])
if len(encoded_review) == 0:
encoded_reviews.append([0])
else:
encoded_reviews.append(encoded_review)
reviews_len = [len(x) for x in encoded_reviews]
pd.Series(reviews_len).hist()
plt.xlabel('Words')
plt.ylabel('Count')
plt.show()
# stats about vocabulary
#print('Unique words: ', len((vocab_to_int)))
#print()
# print tokens in first review
#print('Tokenized review: \n', encoded_reviews[:1])
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
## getting the correct rows x cols shape
features = np.zeros((len(encoded_reviews), seq_length), dtype=int)
## for each review, I grab that review
for i, row in enumerate(encoded_reviews):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
seq_length = 200
features = pad_features(encoded_reviews, seq_length=seq_length)
## test statements - do not change - ##
assert len(features)==len(encoded_reviews), "Your features should have as many rows as reviews."
assert len(features[0])==seq_length, "Each feature row should contain seq_length values."
# print first 10 values of the first 30 batches
#print(features[:30,:10])
train_x, test_x, train_y, test_y = model_selection.train_test_split(features,label, test_size=0.2, random_state=42)
## print out the shapes of your resultant feature data
print("\t\t\tFeatures Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
#train_x=np.array(train_x).astype('float')
#train_y=np.array(train_x).astype('float')
#test_x=np.array(train_x).astype('float')
#test_y=np.array(train_x).astype('float')
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 128
# make sure to SHUFFLE your data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
# print('Sample input size: ', sample_x.size()) # batch_size, seq_length
# print('Sample input: \n', sample_x)
# print()
# print('Sample label size: ', sample_y.size()) # batch_size
# print('Sample label: \n', sample_y)
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
import torch.nn as nn
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layer
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def | (self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size, train_on_gpu):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if(train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
# Instantiate the model w/ hyperparams
vocab_size = len(vocab_to_int) + 1 # +1 for zero padding + our word tokens
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 4
counter = 0
print_every = 100
clip=5 # gradient clipping
train_on_gpu = True
# move model to GPU, if available
if(train_on_gpu):
net.cuda()
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter = 0
# batch loop
for inputs, labels in train_loader:
counter += 1
#print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
inputs = inputs.type(torch.LongTensor)
labels = labels.type(torch.LongTensor)
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# Get test data loss and accuracy
# = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter=0
net.eval()
all_prediction = []
# iterate over test data
for inputs, labels in test_loader:
counter += 1
print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
print(inputs.shape)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
| forward | identifier_name |
lstm.py |
import pandas as pd
from sklearn import model_selection, preprocessing, linear_model, metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
df=pd.read_csv('yelp_academic_dataset_review.csv',nrows=100000)
# df.head()
# print(df.shape)
df_filtered=df[df['stars'] !=3]
# print(df_filtered.shape)
#print(df_filtered.describe().T)
text=list(df_filtered['text'])
stars=list(df_filtered['stars'])
print(type(text))
label=[]
for item in stars:
if item>= 4:
y=1
else:
y=0
label.append(y)
label=np.array(label)
#we can get punctuation from string library
from string import punctuation
print(punctuation)
all_reviews=[]
for item in text:
item = item.lower()
item = "".join([ch for ch in item if ch not in punctuation])
all_reviews.append(item)
all_text = " ".join(all_reviews)
print(all_text[0:20])
all_words = all_text.split()
print(all_words[0:10])
from collections import Counter
# Count all the words using Counter Method
count_words = Counter(all_words)
total_words=len(all_words)
sorted_words=count_words.most_common(total_words)
#print(sorted_words[:30])
vocab_to_int={w:i+1 for i,(w,c) in enumerate(sorted_words)}
#print(vocab_to_int)
#reviews_ints = []
#for review in all_words:
#reviews_ints.append([vocab_to_int[word] for word in all_words])
encoded_reviews=list()
for review in all_reviews:
encoded_review=list()
for word in review.split():
if word not in vocab_to_int.keys():
#if word is not available in vocab_to_int put 0 in that place
encoded_review.append(0)
else:
encoded_review.append(vocab_to_int[word])
if len(encoded_review) == 0:
encoded_reviews.append([0])
else:
encoded_reviews.append(encoded_review)
reviews_len = [len(x) for x in encoded_reviews]
pd.Series(reviews_len).hist()
plt.xlabel('Words')
plt.ylabel('Count')
plt.show()
# stats about vocabulary
#print('Unique words: ', len((vocab_to_int)))
#print()
# print tokens in first review
#print('Tokenized review: \n', encoded_reviews[:1])
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
## getting the correct rows x cols shape
features = np.zeros((len(encoded_reviews), seq_length), dtype=int)
## for each review, I grab that review
for i, row in enumerate(encoded_reviews):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
seq_length = 200
features = pad_features(encoded_reviews, seq_length=seq_length)
## test statements - do not change - ##
assert len(features)==len(encoded_reviews), "Your features should have as many rows as reviews."
assert len(features[0])==seq_length, "Each feature row should contain seq_length values."
# print first 10 values of the first 30 batches
#print(features[:30,:10])
train_x, test_x, train_y, test_y = model_selection.train_test_split(features,label, test_size=0.2, random_state=42)
## print out the shapes of your resultant feature data
print("\t\t\tFeatures Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
#train_x=np.array(train_x).astype('float')
#train_y=np.array(train_x).astype('float')
#test_x=np.array(train_x).astype('float')
#test_y=np.array(train_x).astype('float')
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 128
# make sure to SHUFFLE your data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
# print('Sample input size: ', sample_x.size()) # batch_size, seq_length
# print('Sample input: \n', sample_x)
# print()
# print('Sample label size: ', sample_y.size()) # batch_size
# print('Sample label: \n', sample_y)
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
import torch.nn as nn
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layer
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size, train_on_gpu):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if(train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
# Instantiate the model w/ hyperparams
vocab_size = len(vocab_to_int) + 1 # +1 for zero padding + our word tokens
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 4
counter = 0
print_every = 100
clip=5 # gradient clipping
train_on_gpu = True
# move model to GPU, if available
if(train_on_gpu):
net.cuda()
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
|
# Get test data loss and accuracy
# = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter=0
net.eval()
all_prediction = []
# iterate over test data
for inputs, labels in test_loader:
counter += 1
print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
print(inputs.shape)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
| h = net.init_hidden(batch_size, train_on_gpu)
counter = 0
# batch loop
for inputs, labels in train_loader:
counter += 1
#print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
inputs = inputs.type(torch.LongTensor)
labels = labels.type(torch.LongTensor)
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step() | conditional_block |
lstm.py |
import pandas as pd
from sklearn import model_selection, preprocessing, linear_model, metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
df=pd.read_csv('yelp_academic_dataset_review.csv',nrows=100000)
# df.head()
# print(df.shape)
df_filtered=df[df['stars'] !=3]
# print(df_filtered.shape)
#print(df_filtered.describe().T)
text=list(df_filtered['text'])
stars=list(df_filtered['stars'])
print(type(text))
label=[]
for item in stars:
if item>= 4:
y=1
else:
y=0
label.append(y)
label=np.array(label)
#we can get punctuation from string library
from string import punctuation
print(punctuation)
all_reviews=[]
for item in text:
item = item.lower()
item = "".join([ch for ch in item if ch not in punctuation])
all_reviews.append(item)
all_text = " ".join(all_reviews)
print(all_text[0:20])
all_words = all_text.split()
print(all_words[0:10])
from collections import Counter
# Count all the words using Counter Method
count_words = Counter(all_words)
total_words=len(all_words)
sorted_words=count_words.most_common(total_words)
#print(sorted_words[:30])
vocab_to_int={w:i+1 for i,(w,c) in enumerate(sorted_words)}
#print(vocab_to_int)
#reviews_ints = []
#for review in all_words:
#reviews_ints.append([vocab_to_int[word] for word in all_words])
encoded_reviews=list()
for review in all_reviews:
encoded_review=list()
for word in review.split():
if word not in vocab_to_int.keys():
#if word is not available in vocab_to_int put 0 in that place
encoded_review.append(0)
else:
encoded_review.append(vocab_to_int[word])
if len(encoded_review) == 0:
encoded_reviews.append([0])
else:
encoded_reviews.append(encoded_review)
reviews_len = [len(x) for x in encoded_reviews]
pd.Series(reviews_len).hist()
plt.xlabel('Words')
plt.ylabel('Count')
plt.show()
# stats about vocabulary
#print('Unique words: ', len((vocab_to_int)))
#print()
# print tokens in first review
#print('Tokenized review: \n', encoded_reviews[:1])
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
## getting the correct rows x cols shape
features = np.zeros((len(encoded_reviews), seq_length), dtype=int)
## for each review, I grab that review
for i, row in enumerate(encoded_reviews):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
seq_length = 200
features = pad_features(encoded_reviews, seq_length=seq_length)
## test statements - do not change - ##
assert len(features)==len(encoded_reviews), "Your features should have as many rows as reviews."
assert len(features[0])==seq_length, "Each feature row should contain seq_length values."
# print first 10 values of the first 30 batches
#print(features[:30,:10])
train_x, test_x, train_y, test_y = model_selection.train_test_split(features,label, test_size=0.2, random_state=42)
## print out the shapes of your resultant feature data
print("\t\t\tFeatures Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
#train_x=np.array(train_x).astype('float')
#train_y=np.array(train_x).astype('float')
#test_x=np.array(train_x).astype('float')
#test_y=np.array(train_x).astype('float')
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 128
# make sure to SHUFFLE your data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
# print('Sample input size: ', sample_x.size()) # batch_size, seq_length
# print('Sample input: \n', sample_x)
# print()
# print('Sample label size: ', sample_y.size()) # batch_size
# print('Sample label: \n', sample_y)
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
import torch.nn as nn
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
|
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size, train_on_gpu):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if(train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
# Instantiate the model w/ hyperparams
vocab_size = len(vocab_to_int) + 1 # +1 for zero padding + our word tokens
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 4
counter = 0
print_every = 100
clip=5 # gradient clipping
train_on_gpu = True
# move model to GPU, if available
if(train_on_gpu):
net.cuda()
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter = 0
# batch loop
for inputs, labels in train_loader:
counter += 1
#print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
inputs = inputs.type(torch.LongTensor)
labels = labels.type(torch.LongTensor)
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# Get test data loss and accuracy
# = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter=0
net.eval()
all_prediction = []
# iterate over test data
for inputs, labels in test_loader:
counter += 1
print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
print(inputs.shape)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc))
| """
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layer
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid() | identifier_body |
lstm.py | import pandas as pd
from sklearn import model_selection, preprocessing, linear_model, metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix, recall_score, precision_score, accuracy_score
df=pd.read_csv('yelp_academic_dataset_review.csv',nrows=100000)
# df.head()
# print(df.shape)
df_filtered=df[df['stars'] !=3]
# print(df_filtered.shape)
#print(df_filtered.describe().T)
| print(type(text))
label=[]
for item in stars:
if item>= 4:
y=1
else:
y=0
label.append(y)
label=np.array(label)
#we can get punctuation from string library
from string import punctuation
print(punctuation)
all_reviews=[]
for item in text:
item = item.lower()
item = "".join([ch for ch in item if ch not in punctuation])
all_reviews.append(item)
all_text = " ".join(all_reviews)
print(all_text[0:20])
all_words = all_text.split()
print(all_words[0:10])
from collections import Counter
# Count all the words using Counter Method
count_words = Counter(all_words)
total_words=len(all_words)
sorted_words=count_words.most_common(total_words)
#print(sorted_words[:30])
vocab_to_int={w:i+1 for i,(w,c) in enumerate(sorted_words)}
#print(vocab_to_int)
#reviews_ints = []
#for review in all_words:
#reviews_ints.append([vocab_to_int[word] for word in all_words])
encoded_reviews=list()
for review in all_reviews:
encoded_review=list()
for word in review.split():
if word not in vocab_to_int.keys():
#if word is not available in vocab_to_int put 0 in that place
encoded_review.append(0)
else:
encoded_review.append(vocab_to_int[word])
if len(encoded_review) == 0:
encoded_reviews.append([0])
else:
encoded_reviews.append(encoded_review)
reviews_len = [len(x) for x in encoded_reviews]
pd.Series(reviews_len).hist()
plt.xlabel('Words')
plt.ylabel('Count')
plt.show()
# stats about vocabulary
#print('Unique words: ', len((vocab_to_int)))
#print()
# print tokens in first review
#print('Tokenized review: \n', encoded_reviews[:1])
def pad_features(reviews_ints, seq_length):
''' Return features of review_ints, where each review is padded with 0's
or truncated to the input seq_length.
'''
## getting the correct rows x cols shape
features = np.zeros((len(encoded_reviews), seq_length), dtype=int)
## for each review, I grab that review
for i, row in enumerate(encoded_reviews):
features[i, -len(row):] = np.array(row)[:seq_length]
return features
seq_length = 200
features = pad_features(encoded_reviews, seq_length=seq_length)
## test statements - do not change - ##
assert len(features)==len(encoded_reviews), "Your features should have as many rows as reviews."
assert len(features[0])==seq_length, "Each feature row should contain seq_length values."
# print first 10 values of the first 30 batches
#print(features[:30,:10])
train_x, test_x, train_y, test_y = model_selection.train_test_split(features,label, test_size=0.2, random_state=42)
## print out the shapes of your resultant feature data
print("\t\t\tFeatures Shapes:")
print("Train set: \t\t{}".format(train_x.shape),
"\nTest set: \t\t{}".format(test_x.shape))
#train_x=np.array(train_x).astype('float')
#train_y=np.array(train_x).astype('float')
#test_x=np.array(train_x).astype('float')
#test_y=np.array(train_x).astype('float')
import torch
from torch.utils.data import TensorDataset, DataLoader
# create Tensor datasets
train_data = TensorDataset(torch.from_numpy(train_x), torch.from_numpy(train_y))
test_data = TensorDataset(torch.from_numpy(test_x), torch.from_numpy(test_y))
# dataloaders
batch_size = 128
# make sure to SHUFFLE your data
train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)
test_loader = DataLoader(test_data, shuffle=True, batch_size=batch_size)
dataiter = iter(train_loader)
sample_x, sample_y = dataiter.next()
# print('Sample input size: ', sample_x.size()) # batch_size, seq_length
# print('Sample input: \n', sample_x)
# print()
# print('Sample label size: ', sample_y.size()) # batch_size
# print('Sample label: \n', sample_y)
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
import torch.nn as nn
class SentimentRNN(nn.Module):
"""
The RNN model that will be used to perform Sentiment analysis.
"""
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
"""
Initialize the model by setting up the layers.
"""
super(SentimentRNN, self).__init__()
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
# embedding and LSTM layers
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
dropout=drop_prob, batch_first=True)
# dropout layer
self.dropout = nn.Dropout(0.3)
# linear and sigmoid layer
self.fc = nn.Linear(hidden_dim, output_size)
self.sig = nn.Sigmoid()
def forward(self, x, hidden):
"""
Perform a forward pass of our model on some input and hidden state.
"""
batch_size = x.size(0)
# embeddings and lstm_out
embeds = self.embedding(x)
lstm_out, hidden = self.lstm(embeds, hidden)
# stack up lstm outputs
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
# dropout and fully connected layer
out = self.dropout(lstm_out)
out = self.fc(out)
# sigmoid function
sig_out = self.sig(out)
# reshape to be batch_size first
sig_out = sig_out.view(batch_size, -1)
sig_out = sig_out[:, -1] # get last batch of labels
# return last sigmoid output and hidden state
return sig_out, hidden
def init_hidden(self, batch_size, train_on_gpu):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x hidden_dim,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if(train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
return hidden
# Instantiate the model w/ hyperparams
vocab_size = len(vocab_to_int) + 1 # +1 for zero padding + our word tokens
output_size = 1
embedding_dim = 400
hidden_dim = 256
n_layers = 2
net = SentimentRNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)
print(net)
# loss and optimization functions
lr=0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 4
counter = 0
print_every = 100
clip=5 # gradient clipping
train_on_gpu = True
# move model to GPU, if available
if(train_on_gpu):
net.cuda()
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter = 0
# batch loop
for inputs, labels in train_loader:
counter += 1
#print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
inputs = inputs.type(torch.LongTensor)
labels = labels.type(torch.LongTensor)
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# Get test data loss and accuracy
# = [] # track loss
num_correct = 0
# init hidden state
h = net.init_hidden(batch_size, train_on_gpu)
counter=0
net.eval()
all_prediction = []
# iterate over test data
for inputs, labels in test_loader:
counter += 1
print('epoce: {e}, batch: {b}'.format(e=e, b=counter))
if (labels.shape[0] != batch_size):
continue
print(inputs.shape)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# get predicted outputs
output, h = net(inputs, h)
# convert output probabilities to predicted class (0 or 1)
pred = torch.round(output.squeeze()) # rounds to the nearest integer
# compare predictions to true label
correct_tensor = pred.eq(labels.float().view_as(pred))
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
num_correct += np.sum(correct)
# accuracy over all test data
test_acc = num_correct/len(test_loader.dataset)
print("Test accuracy: {:.3f}".format(test_acc)) | text=list(df_filtered['text'])
stars=list(df_filtered['stars'])
| random_line_split |
Master Solution.py | #!/usr/bin/env python
# coding: utf-8
# # Background
#
# - 1 The adjacent 3 tabs contain a data dump of search strings used by EXP clients to access relevant content available on Gartner.com for the months of August, September and October in the year 2018. Every row mentions if the EXP client is "Premium" or not, Persona (that was used for data extraction for EXP clients from main database), day on which the search string was used and finally the search string. In total there are 68544 rows of data available across all the months.
# ## Task 1
#
# - 2 Clean the dataset using standard text cleaning steps and process the data to allow for the following analysis.
# - 3 Identify the most popular topics being searched for by EXP clients and plot the top 10 topics by their frequency of occurrence.
#
# - 4 Report on the volume growth of these topics over August, September and October.
# ## Task 2
#
# - 5 Used the cleaned dataset from Step 2 and process your dataset for the following analysis.
# - 6 Use the concept of Named Entity Recognition in your code to identify a list of geographies and organizations being mentioned in the search terms.
#
# - 7 Plot the geographies and organizations by their frequency of occurrence (count 1 mention of a geography, if the same geography is mentioned more than once in the same search string). If you can do it for the mention of "technologies" such as ai, analytics etc. then it will be a plus. Any useful trends observed in these mentions of geographies, organizations and technologies should be plotted and presented.
#
# # Final Output & Next Steps
#
# - 8 "Final output of the exercise should include
#
# *1. 3 Code files- 1 used for data cleaning and 2 used for each of the 2 tasks (with data processing and data analysis). Please comment your code appropriately. You will be evaluated for properly structuring your code and for building checks and balances in your analysis- which should be included in your code as well.*
#
# *2. If some data visualization tool such as Tableau/PowerBI is used for presentation of the plots in the panel round (if selected) then it will be considered a plus for the candidate. PPT presentation is acceptable though. The following visualizations are required- *
#
# **- Please prepare 1-2 slides to explain your data cleaning and processing steps, 1-2 slides to display the results of Task 1 (include the methodology used for completing the task), 1-2 slides to display the result of Task 2 (include the methodology used for completing the task), 1-2 slides on what other analysis is possible on the data set including the expected insights from those (for this you will need to mention the preferred methodology for text analysis). "**
# - 9 You will be given a time limit of 3 Days from the time this test is given, to prepare the output. The candidates should upload the output docs- Dashboard/PPT & their 3 code files in a G-drive link and send them across to the assigned recruiter.
# - 10 If your output gets selected, you will be asked to present your findings & approach to our panel of experts who would cross question you on your analysis.
#
# In[26]:
import numpy as np
import pandas as pd
#for text processing
import re
import string
import nltk
from nltk.corpus import stopwords
from textblob import Word
#calculation of time
from time import time
##pretty print
import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
# Build LDA model
from gensim.models.ldamulticore import LdaMulticore
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# spacy
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('max_colwidth', -1)
# #### Merge all 3 sheets
# In[2]:
# from pandas import ExcelWriter
# from pandas import ExcelFile
xls = pd.ExcelFile('data.xlsx')
df1 = pd.read_excel(xls, sheet_name='Aug')
df2 = pd.read_excel(xls, sheet_name='Sept')
df3 = pd.read_excel(xls, sheet_name='Oct')
# In[3]:
df = pd.concat([df1,df2,df3] , ignore_index=True)
# ## Inspect Text field
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.isnull().sum()
# In[7]:
#fetch missing values of a column
df[df["Query Text"].isnull()]
# In[8]:
#drop all the rows which have NaN in Query Text
df = df.dropna(how='any',axis=0)
# In[9]:
df.isnull().sum()
# In[10]:
df.drop_duplicates(subset ="Query Text",
keep = 'last', inplace = True)
# In[11]:
df.info()
# In[12]:
# check the length of documents
document_lengths = np.array(list(map(len, df['Query Text'].str.split(' '))))
print("The average number of words in a document is: {}.".format(np.mean(document_lengths)))
print("The minimum number of words in a document is: {}.".format(min(document_lengths)))
print("The maximum number of words in a document is: {}.".format(max(document_lengths)))
# In[13]:
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 1)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 2)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 3)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 4)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 5)))
# ## Task 1
#
# ### Sub-task 2 : Text pre-processing
# In[14]:
def | (data):
#convert text to lower-case
data['processed_text'] = data['Query Text'].apply(lambda x:' '.join(x.lower() for x in x.split()))
#remove punctuations, unwanted characters
data['processed_text_1']= data['processed_text'].apply(lambda x: "".join([char for char in x if char not in string.punctuation]))
#remove numbers
data['processed_text_2']= data['processed_text_1'].apply(lambda x: re.sub('[0-9]+', ' ' , x))
#remove stopwords
stop = stopwords.words('english')
data['processed_text_3']= data['processed_text_2'].apply(lambda x:' '.join(x for x in x.split() if not x in stop))
#lemmatization
data['processed_text_4']= data['processed_text_3'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
# remove all single characters
data['processed_text_5'] = data['processed_text_4'].apply(lambda x: re.sub(r'\s+[a-zA-Z]\s+', ' ', x))
#create a final text field to work on
data['final_text'] = data['processed_text_5']
# In[15]:
#pre-processing or cleaning data
text_preprocessing(df)
df.head()
# In[16]:
#create tokenized data for LDA
df['final_tokenized'] = list(map(nltk.word_tokenize, df.final_text))
df.head()
# ## LDA training
# In[17]:
# Create Dictionary
id2word = corpora.Dictionary(df['final_tokenized'])
texts = df['final_tokenized']
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# In[18]:
id2word[0]
# In[19]:
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# In[20]:
get_ipython().run_cell_magic('time', '', "\nnum_topics = 10\n\nlda_model = LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics, \n workers=3, #CPU cores\n random_state=100,\n chunksize=400, #Number of documents to be used in each training chunk.\n passes=40, #Number of passes through the corpus during training.\n alpha='asymmetric',\n per_word_topics=True)")
# In[27]:
# View the topics in LDA model
pp.pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# #### What is topic coeherence
#
# https://rare-technologies.com/what-is-topic-coherence/
#
# What exactly is this topic coherence pipeline thing? Why is it even important? Moreover, what is the advantage of having this pipeline at all? In this post I will look to answer those questions in an as non-technical language as possible. This is meant for the general reader as much as a technical one so I will try to engage your imaginations more and your maths skills less.
#
# Imagine that you get water from a lot of places. The way you test this water is by providing it to a lot of people and then taking their reviews. If most of the reviews are bad, you say the water is bad and vice-versa. So basically all your evaluations are based on reviews with ratings as bad or good. If someone asks you exactly how good (or bad) the water is, you blend in your personal opinion. But this doesn’t assign a particular number to the quality of water and thus is only a qualitative analysis. Hence it can’t be used to compare two different sources of water in a definitive manner.
#
# Since you are a lazy person and strive to assign a quantity to the quality, you install four different pipes at the end of the water source and design a meter which tells you the exact quality of water by assigning a number to it. While doing this you receive help from a lot of wonderful people around you and therefore you are successful in installing it. Hence now you don’t need to go and gather hundred different people to get their opinion on the quality of water. You can get it straight from the meter and this value is always in accordance with the human opinions.
#
# The water here is the topics from some topic modelling algorithm. Earlier, the topics coming out from these topic modelling algorithms used to be tested on their human interpretability by presenting them to humans and taking their input on them. This was not quantitative but only qualitative. The meter and the pipes combined (yes you guessed it right) is the topic coherence pipeline. The four pipes are:
#
# Segmentation : Where the water is partitioned into several glasses assuming that the quality of water in each glass is different.
# Probability Estimation : Where the quantity of water in each glass is measured.
# Confirmation Measure : Where the quality of water (according to a certain metric) in each glass is measured and a number is assigned to each glass wrt it’s quantity.
# Aggregation : The meter where these quality numbers are combined in a certain way (say arithmetic mean) to come up with one number.
# And there you have your topic coherence pipeline! There are surely much better analogies than this one but I hope you got the gist of it.
# In[28]:
get_ipython().run_cell_magic('time', '', "\n# Compute Perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=df['final_tokenized'], dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence Score: ', coherence_lda)")
# ## Top 10 topics by frequency of occurence
#
#
# In[29]:
get_ipython().run_cell_magic('time', '', '\n# Visualize the topics\n\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\nvis')
# #### How to find the optimal number of topics for LDA?
#
# My approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.
#
# Choosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.
#
# If you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.
#
# The compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.
# If the coherence score seems to keep increasing, it may make better sense to pick the model that gave the highest CV before flattening out. This is exactly the case here.
#
# So for further steps I will choose the model with 20 topics itself.
# ## Sub-Task2 Named Entity Recognition
# In[34]:
from IPython.display import Image
Image("img/picture.png")
# In[35]:
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
# In[36]:
#removing duplicates
final_text = df['final_text'].unique()
print('Number of Query Text: ', len(final_text))
# In[37]:
corpus = list(nlp.pipe(final_text))
# In[38]:
# Looking at number of times each ent appears in the total corpus
# nb. ents all appear as Spacy tokens, hence needing to cast as str
from collections import defaultdict
all_ents = defaultdict(int)
for i, doc in enumerate(corpus):
#print(i,doc)
for ent in doc.ents:
all_ents[str(ent)] += 1
#print(ent)
print('Number of distinct entities: ', len(all_ents))
# In[39]:
# labels = [x.label_ for x in corpus.ents]
# Counter(labels)
ent_label = []
ent_common = []
for i, doc in enumerate(corpus):
for ent in doc.ents:
ent_label.append(ent.label_)
ent_common.append(ent.text)
print("Unique labels for entities : ", Counter(ent_label))
print("Top 3 frequent tokens : ", Counter(ent_common).most_common(3))
# In[40]:
sentences = []
for i, doc in enumerate(corpus):
for ent in doc.sents:
sentences.append(ent)
print(sentences[0])
# In[41]:
# Most popular ents
import operator
sorted_ents = sorted(all_ents.items(), key=operator.itemgetter(1), reverse=True)
sorted_ents[:30]
# ### List of geographies and organizations being mentioned in the search terms.
# In[52]:
for i, doc in enumerate(corpus):
for ent in doc.ents:
if ent.label_ == 'ORG' or ent.label_ == 'GPE':
print(ent.text, ent.start_char, ent.end_char, ent.label_)
| text_preprocessing | identifier_name |
Master Solution.py | #!/usr/bin/env python
# coding: utf-8
# # Background
#
# - 1 The adjacent 3 tabs contain a data dump of search strings used by EXP clients to access relevant content available on Gartner.com for the months of August, September and October in the year 2018. Every row mentions if the EXP client is "Premium" or not, Persona (that was used for data extraction for EXP clients from main database), day on which the search string was used and finally the search string. In total there are 68544 rows of data available across all the months.
# ## Task 1
#
# - 2 Clean the dataset using standard text cleaning steps and process the data to allow for the following analysis.
# - 3 Identify the most popular topics being searched for by EXP clients and plot the top 10 topics by their frequency of occurrence.
#
# - 4 Report on the volume growth of these topics over August, September and October.
# ## Task 2
#
# - 5 Used the cleaned dataset from Step 2 and process your dataset for the following analysis.
# - 6 Use the concept of Named Entity Recognition in your code to identify a list of geographies and organizations being mentioned in the search terms.
#
# - 7 Plot the geographies and organizations by their frequency of occurrence (count 1 mention of a geography, if the same geography is mentioned more than once in the same search string). If you can do it for the mention of "technologies" such as ai, analytics etc. then it will be a plus. Any useful trends observed in these mentions of geographies, organizations and technologies should be plotted and presented.
#
# # Final Output & Next Steps
#
# - 8 "Final output of the exercise should include
#
# *1. 3 Code files- 1 used for data cleaning and 2 used for each of the 2 tasks (with data processing and data analysis). Please comment your code appropriately. You will be evaluated for properly structuring your code and for building checks and balances in your analysis- which should be included in your code as well.*
#
# *2. If some data visualization tool such as Tableau/PowerBI is used for presentation of the plots in the panel round (if selected) then it will be considered a plus for the candidate. PPT presentation is acceptable though. The following visualizations are required- *
#
# **- Please prepare 1-2 slides to explain your data cleaning and processing steps, 1-2 slides to display the results of Task 1 (include the methodology used for completing the task), 1-2 slides to display the result of Task 2 (include the methodology used for completing the task), 1-2 slides on what other analysis is possible on the data set including the expected insights from those (for this you will need to mention the preferred methodology for text analysis). "**
# - 9 You will be given a time limit of 3 Days from the time this test is given, to prepare the output. The candidates should upload the output docs- Dashboard/PPT & their 3 code files in a G-drive link and send them across to the assigned recruiter.
# - 10 If your output gets selected, you will be asked to present your findings & approach to our panel of experts who would cross question you on your analysis.
#
# In[26]:
import numpy as np
import pandas as pd
#for text processing
import re
import string
import nltk
from nltk.corpus import stopwords
from textblob import Word
#calculation of time
from time import time
##pretty print
import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
# Build LDA model
from gensim.models.ldamulticore import LdaMulticore
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# spacy
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('max_colwidth', -1)
# #### Merge all 3 sheets
# In[2]:
# from pandas import ExcelWriter
# from pandas import ExcelFile
xls = pd.ExcelFile('data.xlsx')
df1 = pd.read_excel(xls, sheet_name='Aug')
df2 = pd.read_excel(xls, sheet_name='Sept')
df3 = pd.read_excel(xls, sheet_name='Oct')
# In[3]:
df = pd.concat([df1,df2,df3] , ignore_index=True)
# ## Inspect Text field
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.isnull().sum()
# In[7]:
#fetch missing values of a column
df[df["Query Text"].isnull()]
# In[8]:
#drop all the rows which have NaN in Query Text
df = df.dropna(how='any',axis=0)
|
# In[9]:
df.isnull().sum()
# In[10]:
df.drop_duplicates(subset ="Query Text",
keep = 'last', inplace = True)
# In[11]:
df.info()
# In[12]:
# check the length of documents
document_lengths = np.array(list(map(len, df['Query Text'].str.split(' '))))
print("The average number of words in a document is: {}.".format(np.mean(document_lengths)))
print("The minimum number of words in a document is: {}.".format(min(document_lengths)))
print("The maximum number of words in a document is: {}.".format(max(document_lengths)))
# In[13]:
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 1)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 2)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 3)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 4)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 5)))
# ## Task 1
#
# ### Sub-task 2 : Text pre-processing
# In[14]:
def text_preprocessing(data):
#convert text to lower-case
data['processed_text'] = data['Query Text'].apply(lambda x:' '.join(x.lower() for x in x.split()))
#remove punctuations, unwanted characters
data['processed_text_1']= data['processed_text'].apply(lambda x: "".join([char for char in x if char not in string.punctuation]))
#remove numbers
data['processed_text_2']= data['processed_text_1'].apply(lambda x: re.sub('[0-9]+', ' ' , x))
#remove stopwords
stop = stopwords.words('english')
data['processed_text_3']= data['processed_text_2'].apply(lambda x:' '.join(x for x in x.split() if not x in stop))
#lemmatization
data['processed_text_4']= data['processed_text_3'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
# remove all single characters
data['processed_text_5'] = data['processed_text_4'].apply(lambda x: re.sub(r'\s+[a-zA-Z]\s+', ' ', x))
#create a final text field to work on
data['final_text'] = data['processed_text_5']
# In[15]:
#pre-processing or cleaning data
text_preprocessing(df)
df.head()
# In[16]:
#create tokenized data for LDA
df['final_tokenized'] = list(map(nltk.word_tokenize, df.final_text))
df.head()
# ## LDA training
# In[17]:
# Create Dictionary
id2word = corpora.Dictionary(df['final_tokenized'])
texts = df['final_tokenized']
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# In[18]:
id2word[0]
# In[19]:
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# In[20]:
get_ipython().run_cell_magic('time', '', "\nnum_topics = 10\n\nlda_model = LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics, \n workers=3, #CPU cores\n random_state=100,\n chunksize=400, #Number of documents to be used in each training chunk.\n passes=40, #Number of passes through the corpus during training.\n alpha='asymmetric',\n per_word_topics=True)")
# In[27]:
# View the topics in LDA model
pp.pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# #### What is topic coeherence
#
# https://rare-technologies.com/what-is-topic-coherence/
#
# What exactly is this topic coherence pipeline thing? Why is it even important? Moreover, what is the advantage of having this pipeline at all? In this post I will look to answer those questions in an as non-technical language as possible. This is meant for the general reader as much as a technical one so I will try to engage your imaginations more and your maths skills less.
#
# Imagine that you get water from a lot of places. The way you test this water is by providing it to a lot of people and then taking their reviews. If most of the reviews are bad, you say the water is bad and vice-versa. So basically all your evaluations are based on reviews with ratings as bad or good. If someone asks you exactly how good (or bad) the water is, you blend in your personal opinion. But this doesn’t assign a particular number to the quality of water and thus is only a qualitative analysis. Hence it can’t be used to compare two different sources of water in a definitive manner.
#
# Since you are a lazy person and strive to assign a quantity to the quality, you install four different pipes at the end of the water source and design a meter which tells you the exact quality of water by assigning a number to it. While doing this you receive help from a lot of wonderful people around you and therefore you are successful in installing it. Hence now you don’t need to go and gather hundred different people to get their opinion on the quality of water. You can get it straight from the meter and this value is always in accordance with the human opinions.
#
# The water here is the topics from some topic modelling algorithm. Earlier, the topics coming out from these topic modelling algorithms used to be tested on their human interpretability by presenting them to humans and taking their input on them. This was not quantitative but only qualitative. The meter and the pipes combined (yes you guessed it right) is the topic coherence pipeline. The four pipes are:
#
# Segmentation : Where the water is partitioned into several glasses assuming that the quality of water in each glass is different.
# Probability Estimation : Where the quantity of water in each glass is measured.
# Confirmation Measure : Where the quality of water (according to a certain metric) in each glass is measured and a number is assigned to each glass wrt it’s quantity.
# Aggregation : The meter where these quality numbers are combined in a certain way (say arithmetic mean) to come up with one number.
# And there you have your topic coherence pipeline! There are surely much better analogies than this one but I hope you got the gist of it.
# In[28]:
get_ipython().run_cell_magic('time', '', "\n# Compute Perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=df['final_tokenized'], dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence Score: ', coherence_lda)")
# ## Top 10 topics by frequency of occurence
#
#
# In[29]:
get_ipython().run_cell_magic('time', '', '\n# Visualize the topics\n\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\nvis')
# #### How to find the optimal number of topics for LDA?
#
# My approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.
#
# Choosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.
#
# If you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.
#
# The compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.
# If the coherence score seems to keep increasing, it may make better sense to pick the model that gave the highest CV before flattening out. This is exactly the case here.
#
# So for further steps I will choose the model with 20 topics itself.
# ## Sub-Task2 Named Entity Recognition
# In[34]:
from IPython.display import Image
Image("img/picture.png")
# In[35]:
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
# In[36]:
#removing duplicates
final_text = df['final_text'].unique()
print('Number of Query Text: ', len(final_text))
# In[37]:
corpus = list(nlp.pipe(final_text))
# In[38]:
# Looking at number of times each ent appears in the total corpus
# nb. ents all appear as Spacy tokens, hence needing to cast as str
from collections import defaultdict
all_ents = defaultdict(int)
for i, doc in enumerate(corpus):
#print(i,doc)
for ent in doc.ents:
all_ents[str(ent)] += 1
#print(ent)
print('Number of distinct entities: ', len(all_ents))
# In[39]:
# labels = [x.label_ for x in corpus.ents]
# Counter(labels)
ent_label = []
ent_common = []
for i, doc in enumerate(corpus):
for ent in doc.ents:
ent_label.append(ent.label_)
ent_common.append(ent.text)
print("Unique labels for entities : ", Counter(ent_label))
print("Top 3 frequent tokens : ", Counter(ent_common).most_common(3))
# In[40]:
sentences = []
for i, doc in enumerate(corpus):
for ent in doc.sents:
sentences.append(ent)
print(sentences[0])
# In[41]:
# Most popular ents
import operator
sorted_ents = sorted(all_ents.items(), key=operator.itemgetter(1), reverse=True)
sorted_ents[:30]
# ### List of geographies and organizations being mentioned in the search terms.
# In[52]:
for i, doc in enumerate(corpus):
for ent in doc.ents:
if ent.label_ == 'ORG' or ent.label_ == 'GPE':
print(ent.text, ent.start_char, ent.end_char, ent.label_) | random_line_split | |
Master Solution.py | #!/usr/bin/env python
# coding: utf-8
# # Background
#
# - 1 The adjacent 3 tabs contain a data dump of search strings used by EXP clients to access relevant content available on Gartner.com for the months of August, September and October in the year 2018. Every row mentions if the EXP client is "Premium" or not, Persona (that was used for data extraction for EXP clients from main database), day on which the search string was used and finally the search string. In total there are 68544 rows of data available across all the months.
# ## Task 1
#
# - 2 Clean the dataset using standard text cleaning steps and process the data to allow for the following analysis.
# - 3 Identify the most popular topics being searched for by EXP clients and plot the top 10 topics by their frequency of occurrence.
#
# - 4 Report on the volume growth of these topics over August, September and October.
# ## Task 2
#
# - 5 Used the cleaned dataset from Step 2 and process your dataset for the following analysis.
# - 6 Use the concept of Named Entity Recognition in your code to identify a list of geographies and organizations being mentioned in the search terms.
#
# - 7 Plot the geographies and organizations by their frequency of occurrence (count 1 mention of a geography, if the same geography is mentioned more than once in the same search string). If you can do it for the mention of "technologies" such as ai, analytics etc. then it will be a plus. Any useful trends observed in these mentions of geographies, organizations and technologies should be plotted and presented.
#
# # Final Output & Next Steps
#
# - 8 "Final output of the exercise should include
#
# *1. 3 Code files- 1 used for data cleaning and 2 used for each of the 2 tasks (with data processing and data analysis). Please comment your code appropriately. You will be evaluated for properly structuring your code and for building checks and balances in your analysis- which should be included in your code as well.*
#
# *2. If some data visualization tool such as Tableau/PowerBI is used for presentation of the plots in the panel round (if selected) then it will be considered a plus for the candidate. PPT presentation is acceptable though. The following visualizations are required- *
#
# **- Please prepare 1-2 slides to explain your data cleaning and processing steps, 1-2 slides to display the results of Task 1 (include the methodology used for completing the task), 1-2 slides to display the result of Task 2 (include the methodology used for completing the task), 1-2 slides on what other analysis is possible on the data set including the expected insights from those (for this you will need to mention the preferred methodology for text analysis). "**
# - 9 You will be given a time limit of 3 Days from the time this test is given, to prepare the output. The candidates should upload the output docs- Dashboard/PPT & their 3 code files in a G-drive link and send them across to the assigned recruiter.
# - 10 If your output gets selected, you will be asked to present your findings & approach to our panel of experts who would cross question you on your analysis.
#
# In[26]:
import numpy as np
import pandas as pd
#for text processing
import re
import string
import nltk
from nltk.corpus import stopwords
from textblob import Word
#calculation of time
from time import time
##pretty print
import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
# Build LDA model
from gensim.models.ldamulticore import LdaMulticore
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# spacy
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('max_colwidth', -1)
# #### Merge all 3 sheets
# In[2]:
# from pandas import ExcelWriter
# from pandas import ExcelFile
xls = pd.ExcelFile('data.xlsx')
df1 = pd.read_excel(xls, sheet_name='Aug')
df2 = pd.read_excel(xls, sheet_name='Sept')
df3 = pd.read_excel(xls, sheet_name='Oct')
# In[3]:
df = pd.concat([df1,df2,df3] , ignore_index=True)
# ## Inspect Text field
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.isnull().sum()
# In[7]:
#fetch missing values of a column
df[df["Query Text"].isnull()]
# In[8]:
#drop all the rows which have NaN in Query Text
df = df.dropna(how='any',axis=0)
# In[9]:
df.isnull().sum()
# In[10]:
df.drop_duplicates(subset ="Query Text",
keep = 'last', inplace = True)
# In[11]:
df.info()
# In[12]:
# check the length of documents
document_lengths = np.array(list(map(len, df['Query Text'].str.split(' '))))
print("The average number of words in a document is: {}.".format(np.mean(document_lengths)))
print("The minimum number of words in a document is: {}.".format(min(document_lengths)))
print("The maximum number of words in a document is: {}.".format(max(document_lengths)))
# In[13]:
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 1)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 2)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 3)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 4)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 5)))
# ## Task 1
#
# ### Sub-task 2 : Text pre-processing
# In[14]:
def text_preprocessing(data):
#convert text to lower-case
|
# In[15]:
#pre-processing or cleaning data
text_preprocessing(df)
df.head()
# In[16]:
#create tokenized data for LDA
df['final_tokenized'] = list(map(nltk.word_tokenize, df.final_text))
df.head()
# ## LDA training
# In[17]:
# Create Dictionary
id2word = corpora.Dictionary(df['final_tokenized'])
texts = df['final_tokenized']
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# In[18]:
id2word[0]
# In[19]:
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# In[20]:
get_ipython().run_cell_magic('time', '', "\nnum_topics = 10\n\nlda_model = LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics, \n workers=3, #CPU cores\n random_state=100,\n chunksize=400, #Number of documents to be used in each training chunk.\n passes=40, #Number of passes through the corpus during training.\n alpha='asymmetric',\n per_word_topics=True)")
# In[27]:
# View the topics in LDA model
pp.pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# #### What is topic coeherence
#
# https://rare-technologies.com/what-is-topic-coherence/
#
# What exactly is this topic coherence pipeline thing? Why is it even important? Moreover, what is the advantage of having this pipeline at all? In this post I will look to answer those questions in an as non-technical language as possible. This is meant for the general reader as much as a technical one so I will try to engage your imaginations more and your maths skills less.
#
# Imagine that you get water from a lot of places. The way you test this water is by providing it to a lot of people and then taking their reviews. If most of the reviews are bad, you say the water is bad and vice-versa. So basically all your evaluations are based on reviews with ratings as bad or good. If someone asks you exactly how good (or bad) the water is, you blend in your personal opinion. But this doesn’t assign a particular number to the quality of water and thus is only a qualitative analysis. Hence it can’t be used to compare two different sources of water in a definitive manner.
#
# Since you are a lazy person and strive to assign a quantity to the quality, you install four different pipes at the end of the water source and design a meter which tells you the exact quality of water by assigning a number to it. While doing this you receive help from a lot of wonderful people around you and therefore you are successful in installing it. Hence now you don’t need to go and gather hundred different people to get their opinion on the quality of water. You can get it straight from the meter and this value is always in accordance with the human opinions.
#
# The water here is the topics from some topic modelling algorithm. Earlier, the topics coming out from these topic modelling algorithms used to be tested on their human interpretability by presenting them to humans and taking their input on them. This was not quantitative but only qualitative. The meter and the pipes combined (yes you guessed it right) is the topic coherence pipeline. The four pipes are:
#
# Segmentation : Where the water is partitioned into several glasses assuming that the quality of water in each glass is different.
# Probability Estimation : Where the quantity of water in each glass is measured.
# Confirmation Measure : Where the quality of water (according to a certain metric) in each glass is measured and a number is assigned to each glass wrt it’s quantity.
# Aggregation : The meter where these quality numbers are combined in a certain way (say arithmetic mean) to come up with one number.
# And there you have your topic coherence pipeline! There are surely much better analogies than this one but I hope you got the gist of it.
# In[28]:
get_ipython().run_cell_magic('time', '', "\n# Compute Perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=df['final_tokenized'], dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence Score: ', coherence_lda)")
# ## Top 10 topics by frequency of occurence
#
#
# In[29]:
get_ipython().run_cell_magic('time', '', '\n# Visualize the topics\n\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\nvis')
# #### How to find the optimal number of topics for LDA?
#
# My approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.
#
# Choosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.
#
# If you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.
#
# The compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.
# If the coherence score seems to keep increasing, it may make better sense to pick the model that gave the highest CV before flattening out. This is exactly the case here.
#
# So for further steps I will choose the model with 20 topics itself.
# ## Sub-Task2 Named Entity Recognition
# In[34]:
from IPython.display import Image
Image("img/picture.png")
# In[35]:
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
# In[36]:
#removing duplicates
final_text = df['final_text'].unique()
print('Number of Query Text: ', len(final_text))
# In[37]:
corpus = list(nlp.pipe(final_text))
# In[38]:
# Looking at number of times each ent appears in the total corpus
# nb. ents all appear as Spacy tokens, hence needing to cast as str
from collections import defaultdict
all_ents = defaultdict(int)
for i, doc in enumerate(corpus):
#print(i,doc)
for ent in doc.ents:
all_ents[str(ent)] += 1
#print(ent)
print('Number of distinct entities: ', len(all_ents))
# In[39]:
# labels = [x.label_ for x in corpus.ents]
# Counter(labels)
ent_label = []
ent_common = []
for i, doc in enumerate(corpus):
for ent in doc.ents:
ent_label.append(ent.label_)
ent_common.append(ent.text)
print("Unique labels for entities : ", Counter(ent_label))
print("Top 3 frequent tokens : ", Counter(ent_common).most_common(3))
# In[40]:
sentences = []
for i, doc in enumerate(corpus):
for ent in doc.sents:
sentences.append(ent)
print(sentences[0])
# In[41]:
# Most popular ents
import operator
sorted_ents = sorted(all_ents.items(), key=operator.itemgetter(1), reverse=True)
sorted_ents[:30]
# ### List of geographies and organizations being mentioned in the search terms.
# In[52]:
for i, doc in enumerate(corpus):
for ent in doc.ents:
if ent.label_ == 'ORG' or ent.label_ == 'GPE':
print(ent.text, ent.start_char, ent.end_char, ent.label_)
| data['processed_text'] = data['Query Text'].apply(lambda x:' '.join(x.lower() for x in x.split()))
#remove punctuations, unwanted characters
data['processed_text_1']= data['processed_text'].apply(lambda x: "".join([char for char in x if char not in string.punctuation]))
#remove numbers
data['processed_text_2']= data['processed_text_1'].apply(lambda x: re.sub('[0-9]+', ' ' , x))
#remove stopwords
stop = stopwords.words('english')
data['processed_text_3']= data['processed_text_2'].apply(lambda x:' '.join(x for x in x.split() if not x in stop))
#lemmatization
data['processed_text_4']= data['processed_text_3'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
# remove all single characters
data['processed_text_5'] = data['processed_text_4'].apply(lambda x: re.sub(r'\s+[a-zA-Z]\s+', ' ', x))
#create a final text field to work on
data['final_text'] = data['processed_text_5'] | identifier_body |
Master Solution.py | #!/usr/bin/env python
# coding: utf-8
# # Background
#
# - 1 The adjacent 3 tabs contain a data dump of search strings used by EXP clients to access relevant content available on Gartner.com for the months of August, September and October in the year 2018. Every row mentions if the EXP client is "Premium" or not, Persona (that was used for data extraction for EXP clients from main database), day on which the search string was used and finally the search string. In total there are 68544 rows of data available across all the months.
# ## Task 1
#
# - 2 Clean the dataset using standard text cleaning steps and process the data to allow for the following analysis.
# - 3 Identify the most popular topics being searched for by EXP clients and plot the top 10 topics by their frequency of occurrence.
#
# - 4 Report on the volume growth of these topics over August, September and October.
# ## Task 2
#
# - 5 Used the cleaned dataset from Step 2 and process your dataset for the following analysis.
# - 6 Use the concept of Named Entity Recognition in your code to identify a list of geographies and organizations being mentioned in the search terms.
#
# - 7 Plot the geographies and organizations by their frequency of occurrence (count 1 mention of a geography, if the same geography is mentioned more than once in the same search string). If you can do it for the mention of "technologies" such as ai, analytics etc. then it will be a plus. Any useful trends observed in these mentions of geographies, organizations and technologies should be plotted and presented.
#
# # Final Output & Next Steps
#
# - 8 "Final output of the exercise should include
#
# *1. 3 Code files- 1 used for data cleaning and 2 used for each of the 2 tasks (with data processing and data analysis). Please comment your code appropriately. You will be evaluated for properly structuring your code and for building checks and balances in your analysis- which should be included in your code as well.*
#
# *2. If some data visualization tool such as Tableau/PowerBI is used for presentation of the plots in the panel round (if selected) then it will be considered a plus for the candidate. PPT presentation is acceptable though. The following visualizations are required- *
#
# **- Please prepare 1-2 slides to explain your data cleaning and processing steps, 1-2 slides to display the results of Task 1 (include the methodology used for completing the task), 1-2 slides to display the result of Task 2 (include the methodology used for completing the task), 1-2 slides on what other analysis is possible on the data set including the expected insights from those (for this you will need to mention the preferred methodology for text analysis). "**
# - 9 You will be given a time limit of 3 Days from the time this test is given, to prepare the output. The candidates should upload the output docs- Dashboard/PPT & their 3 code files in a G-drive link and send them across to the assigned recruiter.
# - 10 If your output gets selected, you will be asked to present your findings & approach to our panel of experts who would cross question you on your analysis.
#
# In[26]:
import numpy as np
import pandas as pd
#for text processing
import re
import string
import nltk
from nltk.corpus import stopwords
from textblob import Word
#calculation of time
from time import time
##pretty print
import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim.corpora import Dictionary
# Build LDA model
from gensim.models.ldamulticore import LdaMulticore
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# spacy
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('max_colwidth', -1)
# #### Merge all 3 sheets
# In[2]:
# from pandas import ExcelWriter
# from pandas import ExcelFile
xls = pd.ExcelFile('data.xlsx')
df1 = pd.read_excel(xls, sheet_name='Aug')
df2 = pd.read_excel(xls, sheet_name='Sept')
df3 = pd.read_excel(xls, sheet_name='Oct')
# In[3]:
df = pd.concat([df1,df2,df3] , ignore_index=True)
# ## Inspect Text field
# In[4]:
df.head()
# In[5]:
df.info()
# In[6]:
df.isnull().sum()
# In[7]:
#fetch missing values of a column
df[df["Query Text"].isnull()]
# In[8]:
#drop all the rows which have NaN in Query Text
df = df.dropna(how='any',axis=0)
# In[9]:
df.isnull().sum()
# In[10]:
df.drop_duplicates(subset ="Query Text",
keep = 'last', inplace = True)
# In[11]:
df.info()
# In[12]:
# check the length of documents
document_lengths = np.array(list(map(len, df['Query Text'].str.split(' '))))
print("The average number of words in a document is: {}.".format(np.mean(document_lengths)))
print("The minimum number of words in a document is: {}.".format(min(document_lengths)))
print("The maximum number of words in a document is: {}.".format(max(document_lengths)))
# In[13]:
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 1)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 2)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 3)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 4)))
print("There are {} documents with tops 5 words.".format(sum(document_lengths == 5)))
# ## Task 1
#
# ### Sub-task 2 : Text pre-processing
# In[14]:
def text_preprocessing(data):
#convert text to lower-case
data['processed_text'] = data['Query Text'].apply(lambda x:' '.join(x.lower() for x in x.split()))
#remove punctuations, unwanted characters
data['processed_text_1']= data['processed_text'].apply(lambda x: "".join([char for char in x if char not in string.punctuation]))
#remove numbers
data['processed_text_2']= data['processed_text_1'].apply(lambda x: re.sub('[0-9]+', ' ' , x))
#remove stopwords
stop = stopwords.words('english')
data['processed_text_3']= data['processed_text_2'].apply(lambda x:' '.join(x for x in x.split() if not x in stop))
#lemmatization
data['processed_text_4']= data['processed_text_3'].apply(lambda x: " ".join([Word(word).lemmatize() for word in x.split()]))
# remove all single characters
data['processed_text_5'] = data['processed_text_4'].apply(lambda x: re.sub(r'\s+[a-zA-Z]\s+', ' ', x))
#create a final text field to work on
data['final_text'] = data['processed_text_5']
# In[15]:
#pre-processing or cleaning data
text_preprocessing(df)
df.head()
# In[16]:
#create tokenized data for LDA
df['final_tokenized'] = list(map(nltk.word_tokenize, df.final_text))
df.head()
# ## LDA training
# In[17]:
# Create Dictionary
id2word = corpora.Dictionary(df['final_tokenized'])
texts = df['final_tokenized']
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# In[18]:
id2word[0]
# In[19]:
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# In[20]:
get_ipython().run_cell_magic('time', '', "\nnum_topics = 10\n\nlda_model = LdaMulticore(corpus=corpus,\n id2word=id2word,\n num_topics=num_topics, \n workers=3, #CPU cores\n random_state=100,\n chunksize=400, #Number of documents to be used in each training chunk.\n passes=40, #Number of passes through the corpus during training.\n alpha='asymmetric',\n per_word_topics=True)")
# In[27]:
# View the topics in LDA model
pp.pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# #### What is topic coeherence
#
# https://rare-technologies.com/what-is-topic-coherence/
#
# What exactly is this topic coherence pipeline thing? Why is it even important? Moreover, what is the advantage of having this pipeline at all? In this post I will look to answer those questions in an as non-technical language as possible. This is meant for the general reader as much as a technical one so I will try to engage your imaginations more and your maths skills less.
#
# Imagine that you get water from a lot of places. The way you test this water is by providing it to a lot of people and then taking their reviews. If most of the reviews are bad, you say the water is bad and vice-versa. So basically all your evaluations are based on reviews with ratings as bad or good. If someone asks you exactly how good (or bad) the water is, you blend in your personal opinion. But this doesn’t assign a particular number to the quality of water and thus is only a qualitative analysis. Hence it can’t be used to compare two different sources of water in a definitive manner.
#
# Since you are a lazy person and strive to assign a quantity to the quality, you install four different pipes at the end of the water source and design a meter which tells you the exact quality of water by assigning a number to it. While doing this you receive help from a lot of wonderful people around you and therefore you are successful in installing it. Hence now you don’t need to go and gather hundred different people to get their opinion on the quality of water. You can get it straight from the meter and this value is always in accordance with the human opinions.
#
# The water here is the topics from some topic modelling algorithm. Earlier, the topics coming out from these topic modelling algorithms used to be tested on their human interpretability by presenting them to humans and taking their input on them. This was not quantitative but only qualitative. The meter and the pipes combined (yes you guessed it right) is the topic coherence pipeline. The four pipes are:
#
# Segmentation : Where the water is partitioned into several glasses assuming that the quality of water in each glass is different.
# Probability Estimation : Where the quantity of water in each glass is measured.
# Confirmation Measure : Where the quality of water (according to a certain metric) in each glass is measured and a number is assigned to each glass wrt it’s quantity.
# Aggregation : The meter where these quality numbers are combined in a certain way (say arithmetic mean) to come up with one number.
# And there you have your topic coherence pipeline! There are surely much better analogies than this one but I hope you got the gist of it.
# In[28]:
get_ipython().run_cell_magic('time', '', "\n# Compute Perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.\n\n# Compute Coherence Score\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=df['final_tokenized'], dictionary=id2word, coherence='c_v')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence Score: ', coherence_lda)")
# ## Top 10 topics by frequency of occurence
#
#
# In[29]:
get_ipython().run_cell_magic('time', '', '\n# Visualize the topics\n\npyLDAvis.enable_notebook()\nvis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)\nvis')
# #### How to find the optimal number of topics for LDA?
#
# My approach to finding the optimal number of topics is to build many LDA models with different values of number of topics (k) and pick the one that gives the highest coherence value.
#
# Choosing a ‘k’ that marks the end of a rapid growth of topic coherence usually offers meaningful and interpretable topics. Picking an even higher value can sometimes provide more granular sub-topics.
#
# If you see the same keywords being repeated in multiple topics, it’s probably a sign that the ‘k’ is too large.
#
# The compute_coherence_values() (see below) trains multiple LDA models and provides the models and their corresponding coherence scores.
# If the coherence score seems to keep increasing, it may make better sense to pick the model that gave the highest CV before flattening out. This is exactly the case here.
#
# So for further steps I will choose the model with 20 topics itself.
# ## Sub-Task2 Named Entity Recognition
# In[34]:
from IPython.display import Image
Image("img/picture.png")
# In[35]:
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
# In[36]:
#removing duplicates
final_text = df['final_text'].unique()
print('Number of Query Text: ', len(final_text))
# In[37]:
corpus = list(nlp.pipe(final_text))
# In[38]:
# Looking at number of times each ent appears in the total corpus
# nb. ents all appear as Spacy tokens, hence needing to cast as str
from collections import defaultdict
all_ents = defaultdict(int)
for i, doc in enumerate(corpus):
#print(i,doc)
for ent in doc.ents:
all_ents[str(ent)] += 1
#print(ent)
print('Number of distinct entities: ', len(all_ents))
# In[39]:
# labels = [x.label_ for x in corpus.ents]
# Counter(labels)
ent_label = []
ent_common = []
for i, doc in enumerate(corpus):
for ent in doc.ents:
ent_label.append(e | nique labels for entities : ", Counter(ent_label))
print("Top 3 frequent tokens : ", Counter(ent_common).most_common(3))
# In[40]:
sentences = []
for i, doc in enumerate(corpus):
for ent in doc.sents:
sentences.append(ent)
print(sentences[0])
# In[41]:
# Most popular ents
import operator
sorted_ents = sorted(all_ents.items(), key=operator.itemgetter(1), reverse=True)
sorted_ents[:30]
# ### List of geographies and organizations being mentioned in the search terms.
# In[52]:
for i, doc in enumerate(corpus):
for ent in doc.ents:
if ent.label_ == 'ORG' or ent.label_ == 'GPE':
print(ent.text, ent.start_char, ent.end_char, ent.label_)
| nt.label_)
ent_common.append(ent.text)
print("U | conditional_block |
helpers.go | package en
import (
"bytes"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang-collections/collections/stack"
"golang.org/x/net/html"
)
// Tag string type that corresponds to the html tags
type Tag struct {
Tag string
Attrs map[string]string
}
const (
iTag string = "i"
bTag string = "b"
strongTag string = "strong"
scriptTag string = "script"
aTag string = "a"
)
type Coordinate struct {
Lat float64 `json:"lattitude"`
Lon float64 `json:"longtitude"`
OriginalString string `json:"name"`
}
type Coordinates []Coordinate
func (c Coordinate) String() (text string) {
text = fmt.Sprintf("%s (%f, %f)", c.OriginalString, c.Lat, c.Lon)
return
}
// Image stores data for the images in the text, e.g. URL to download the image,
// Filepath - path where file was downloaded
type Image struct {
URL string
Caption string
Filepath string
}
// Images - array of Image objects
type Images []Image
func extractCoordinates(text string, re *regexp.Regexp) (string, Coordinates) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
coords = Coordinates{}
)
if len(mr) > 0 {
for _, item := range mr {
lon, _ := strconv.ParseFloat(item[1], 64)
lat, _ := strconv.ParseFloat(item[2], 64)
if len(item) > 3 {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[3]})
} else {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[0]})
}
result = regexp.MustCompile(item[0]).ReplaceAllLiteralString(result, "#coords#")
}
}
return result, coords
}
// ExtractCoordinates extracts coordinates from the given text and returns the updated string
// with replaced coordinates and the list of coordinates
func ExtractCoordinates(text string) (string, Coordinates) {
var (
// <a href="geo:49.976136, 36.267256">49.976136, 36.267256</a>
geoHrefRe = regexp.MustCompile("<a.+?href=\"geo:(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})\">(.+?)</a>")
// <a href="https://www.google.com.ua/maps/@50.0363257,36.2120039,19z" target="blank">50.036435 36.211914</a>
hrefRe = regexp.MustCompile("<a.+?href=\"https?://.+?(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,}).*?\">(.+?)</a>")
// 49.976136, 36.267256
numbersRe = regexp.MustCompile("(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})")
res = text
coords = Coordinates{}
tmpCoords Coordinates
)
log.Print("[INFO] Extract coordinates from task text")
for _, re := range []*regexp.Regexp{geoHrefRe, hrefRe, numbersRe} {
res, tmpCoords = extractCoordinates(res, re)
coords = append(coords, tmpCoords...)
}
for _, coord := range coords {
res = strings.Replace(res, "#coords#", coord.OriginalString, 1)
}
if DEBUG {
log.Printf("[DEBUG] Found %d coordinates", len(coords))
}
return res, coords
}
func extractImages(text string, re *regexp.Regexp, caption string, start int) (string, Images) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
images = Images{}
)
if len(mr) > 0 {
for i, item := range mr {
images = append(images, Image{URL: item[1], Caption: fmt.Sprintf("%s #%d", caption, start+i)})
result = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(result, fmt.Sprintf("%s #%d", caption, start+i))
}
}
return result, images
}
// ExtractImages extracts images from the given text and returns the updated
// version of the text and the list of images
func ExtractImages(text string, caption string) (string, Images) {
var (
reImg = regexp.MustCompile("<img.+?src=\"\\s*(https?://.+?)\\s*\".*?>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(https?://.+?\\.(jpg|png|bmp))\\\\?\".*?>(.*?)</a>")
result = text
images = Images{}
tmpImages Images
)
//log.Printf("Before image replacing: %s", text)
log.Print("[INFO] Extract images from task text")
for _, re := range []*regexp.Regexp{reImg, reA} {
result, tmpImages = extractImages(result, re, caption, len(images)+1)
images = append(images, tmpImages...)
}
if DEBUG {
log.Printf("[DEBUG] Found %d images", len(images))
}
return result, images
}
// ReplaceHTMLTags finds all html tags and removes them. Some tags like bold, italic are replaed with
// makrkups for telegram
func ReplaceHTMLTags(text string) string {
var (
parser = html.NewTokenizer(strings.NewReader(text))
tagStack = stack.New()
textToTag = map[int]string{}
)
for {
node := parser.Next()
switch node {
case html.ErrorToken:
result := strings.Replace(textToTag[0], " ", " ", -1)
return result
case html.TextToken:
t := string(parser.Text())
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], t}, "")
case html.StartTagToken:
tagName, hasAttr := parser.TagName()
if string(tagName) == scriptTag {
// We can skip script tags, as they are invisible for the user, but we can indicate that there are
// scripts in the task. To skip tag, it is necessary to call Next() two times:
// 1) returns TextToken with the script body
// 2) returns EndTagToken for the closed script tag
// Usually script tag doesn't have any neste tags, so this aproach should work
log.Printf("[INFO] Skipping script tag")
parser.Next()
parser.Next()
continue
}
tag := Tag{Tag: string(tagName), Attrs: map[string]string{}}
if hasAttr {
for {
attr, val, moreAttr := parser.TagAttr()
if DEBUG {
log.Printf("[DEBUG] Found attr %s", attr)
}
tag.Attrs[string(attr)] = string(val)
if !moreAttr {
break
}
}
}
if DEBUG {
log.Printf("[DEBUG] Found tag %q", tag)
}
tagStack.Push(tag)
case html.EndTagToken:
var (
addText string
tagNo = tagStack.Len()
tag = tagStack.Pop()
closedTag, _ = parser.TagName()
)
if tag.(Tag).Tag != string(closedTag) {
log.Printf("[WARNING] Found closed tag %q but expected %q", closedTag, tag)
continue
}
if DEBUG {
log.Printf("[DEBUG] Found end of tag %q", closedTag)
}
switch tag.(Tag).Tag {
case iTag:
addText = fmt.Sprintf("_%s_", textToTag[tagNo])
case bTag, strongTag:
addText = fmt.Sprintf("*%s*", textToTag[tagNo])
case aTag:
// if strings.Compare(string(attr), "href") == 0 {
addText = fmt.Sprintf("[%s](%s)", textToTag[tagNo], tag.(Tag).Attrs["href"])
// }
default:
addText = textToTag[tagNo]
}
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], addText}, "")
delete(textToTag, tagNo)
}
}
}
// ReplaceCommonTags deprecated - should be removed!!!
func ReplaceCommonTags(text string) string {
log.Print("Replace html tags")
var (
reBr = regexp.MustCompile("<br\\s*/?>")
reHr = regexp.MustCompile("<hr.*?/?>")
reP = regexp.MustCompile("<p>([^ ]+?)</p>")
reBold = regexp.MustCompile("<b.*?/?>((?s:.*?))</b>")
reStrong = regexp.MustCompile("<strong.*?>(.*?)</strong>")
reItalic = regexp.MustCompile("<i>((?s:.+?))</i>")
reSpan = regexp.MustCompile("<span.*?>(.*?)</span>")
reCenter = regexp.MustCompile("<center>((?s:.*?))</center>")
reFont = regexp.MustCompile("<font.+?color\\s*=\\\\?[\"«]?#?(\\w+)\\\\?[\"»]?.*?>((?s:.*?))</font>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(.+?)\\\\?\".*?>(.+?)</a>")
res = text
)
res = strings.Replace(text, "_", "\\_", -1)
if mrBr := reBr.FindAllStringSubmatch(text, -1); len(mrBr) > 0 {
| if mrHr := reHr.FindAllStringSubmatch(res, -1); len(mrHr) > 0 {
for _, item := range mrHr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrP := reP.FindAllStringSubmatch(res, -1); len(mrP) > 0 {
for _, item := range mrP {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("\n%s", item[1]))
}
}
if mrFont := reFont.FindAllStringSubmatch(res, -1); len(mrFont) > 0 {
for _, item := range mrFont {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("%s", item[2]))
//ReplaceAllLiteral(res, []byte(fmt.Sprintf("#%s#%s#", item[1], item[2])))
}
}
if mrBold := reBold.FindAllStringSubmatch(res, -1); len(mrBold) > 0 {
for _, item := range mrBold {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrStrong := reStrong.FindAllStringSubmatch(res, -1); len(mrStrong) > 0 {
for _, item := range mrStrong {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrItalic := reItalic.FindAllStringSubmatch(res, -1); len(mrItalic) > 0 {
for _, item := range mrItalic {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("_%s_", item[1]))
}
}
if mrSpan := reSpan.FindAllStringSubmatch(res, -1); len(mrSpan) > 0 {
for _, item := range mrSpan {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrCenter := reCenter.FindAllStringSubmatch(res, -1); len(mrCenter) > 0 {
for _, item := range mrCenter {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrA := reA.FindAllStringSubmatch(res, -1); len(mrA) > 0 {
for _, item := range mrA {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("[%s](%s)", item[2], item[1]))
}
}
res = strings.Replace(res, " ", " ", -1)
res = regexp.MustCompile("</?p>").ReplaceAllLiteralString(res, "")
return string(res)
}
func BlockTypeToString(typeId int8) string {
if typeId == 0 || typeId == 1 {
return "Игрок"
}
return "Команда"
}
func PrettyTimePrint(d time.Duration, nominative bool) (res *bytes.Buffer) {
var s string
res = bytes.NewBufferString(s)
//defer res.Close()
if (d / 3600) > 0 {
//res.WriteString(fmt.Sprintf("%d часов ", d/3600))
switch d / 3600 {
case 1, 21, 31, 41, 51:
res.WriteString(fmt.Sprintf("%d час ", d/3600))
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d часа ", d/3600))
default:
res.WriteString(fmt.Sprintf("%d часов ", d/3600))
}
}
if (d/60)%60 > 0 {
switch (d / 60) % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d минута ", (d/60)%60))
} else {
res.WriteString(fmt.Sprintf("%d минуту ", (d/60)%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d минуты ", (d/60)%60))
default:
res.WriteString(fmt.Sprintf("%d минут ", (d/60)%60))
}
}
if d%60 > 0 {
switch d % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d секунда", d%60))
} else {
res.WriteString(fmt.Sprintf("%d секунду", d%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d секунды", d%60))
default:
res.WriteString(fmt.Sprintf("%d секунд", d%60))
}
}
return
}
| for _, item := range mrBr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
| conditional_block |
helpers.go | package en
import (
"bytes"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang-collections/collections/stack"
"golang.org/x/net/html"
)
// Tag string type that corresponds to the html tags
type Tag struct {
Tag string
Attrs map[string]string
}
const (
iTag string = "i"
bTag string = "b"
strongTag string = "strong"
scriptTag string = "script"
aTag string = "a"
)
type Coordinate struct {
Lat float64 `json:"lattitude"`
Lon float64 `json:"longtitude"`
OriginalString string `json:"name"`
}
type Coordinates []Coordinate
func (c Coordinate) String() (text string) {
text = fmt.Sprintf("%s (%f, %f)", c.OriginalString, c.Lat, c.Lon)
return
}
// Image stores data for the images in the text, e.g. URL to download the image,
// Filepath - path where file was downloaded
type Image struct {
URL string
Caption string
Filepath string
}
// Images - array of Image objects
type Images []Image
func extractCoordinates(text string, re *regexp.Regexp) (string, Coordinates) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
coords = Coordinates{}
)
if len(mr) > 0 {
for _, item := range mr {
lon, _ := strconv.ParseFloat(item[1], 64)
lat, _ := strconv.ParseFloat(item[2], 64)
if len(item) > 3 {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[3]})
} else {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[0]})
}
result = regexp.MustCompile(item[0]).ReplaceAllLiteralString(result, "#coords#")
}
}
return result, coords
}
// ExtractCoordinates extracts coordinates from the given text and returns the updated string
// with replaced coordinates and the list of coordinates
func ExtractCoordinates(text string) (string, Coordinates) {
var (
// <a href="geo:49.976136, 36.267256">49.976136, 36.267256</a>
geoHrefRe = regexp.MustCompile("<a.+?href=\"geo:(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})\">(.+?)</a>")
// <a href="https://www.google.com.ua/maps/@50.0363257,36.2120039,19z" target="blank">50.036435 36.211914</a>
hrefRe = regexp.MustCompile("<a.+?href=\"https?://.+?(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,}).*?\">(.+?)</a>")
// 49.976136, 36.267256
numbersRe = regexp.MustCompile("(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})")
res = text
coords = Coordinates{}
tmpCoords Coordinates
)
log.Print("[INFO] Extract coordinates from task text")
for _, re := range []*regexp.Regexp{geoHrefRe, hrefRe, numbersRe} {
res, tmpCoords = extractCoordinates(res, re)
coords = append(coords, tmpCoords...)
}
for _, coord := range coords {
res = strings.Replace(res, "#coords#", coord.OriginalString, 1)
}
if DEBUG {
log.Printf("[DEBUG] Found %d coordinates", len(coords))
}
return res, coords
}
func extractImages(text string, re *regexp.Regexp, caption string, start int) (string, Images) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
images = Images{}
)
if len(mr) > 0 {
for i, item := range mr {
images = append(images, Image{URL: item[1], Caption: fmt.Sprintf("%s #%d", caption, start+i)})
result = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(result, fmt.Sprintf("%s #%d", caption, start+i))
}
}
return result, images
}
// ExtractImages extracts images from the given text and returns the updated
// version of the text and the list of images
func ExtractImages(text string, caption string) (string, Images) {
var (
reImg = regexp.MustCompile("<img.+?src=\"\\s*(https?://.+?)\\s*\".*?>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(https?://.+?\\.(jpg|png|bmp))\\\\?\".*?>(.*?)</a>")
result = text
images = Images{}
tmpImages Images
)
//log.Printf("Before image replacing: %s", text)
log.Print("[INFO] Extract images from task text")
for _, re := range []*regexp.Regexp{reImg, reA} {
result, tmpImages = extractImages(result, re, caption, len(images)+1)
images = append(images, tmpImages...)
}
if DEBUG {
log.Printf("[DEBUG] Found %d images", len(images))
}
return result, images
}
// ReplaceHTMLTags finds all html tags and removes them. Some tags like bold, italic are replaed with
// makrkups for telegram
func ReplaceHTMLTags(text string) string |
// ReplaceCommonTags deprecated - should be removed!!!
func ReplaceCommonTags(text string) string {
log.Print("Replace html tags")
var (
reBr = regexp.MustCompile("<br\\s*/?>")
reHr = regexp.MustCompile("<hr.*?/?>")
reP = regexp.MustCompile("<p>([^ ]+?)</p>")
reBold = regexp.MustCompile("<b.*?/?>((?s:.*?))</b>")
reStrong = regexp.MustCompile("<strong.*?>(.*?)</strong>")
reItalic = regexp.MustCompile("<i>((?s:.+?))</i>")
reSpan = regexp.MustCompile("<span.*?>(.*?)</span>")
reCenter = regexp.MustCompile("<center>((?s:.*?))</center>")
reFont = regexp.MustCompile("<font.+?color\\s*=\\\\?[\"«]?#?(\\w+)\\\\?[\"»]?.*?>((?s:.*?))</font>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(.+?)\\\\?\".*?>(.+?)</a>")
res = text
)
res = strings.Replace(text, "_", "\\_", -1)
if mrBr := reBr.FindAllStringSubmatch(text, -1); len(mrBr) > 0 {
for _, item := range mrBr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrHr := reHr.FindAllStringSubmatch(res, -1); len(mrHr) > 0 {
for _, item := range mrHr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrP := reP.FindAllStringSubmatch(res, -1); len(mrP) > 0 {
for _, item := range mrP {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("\n%s", item[1]))
}
}
if mrFont := reFont.FindAllStringSubmatch(res, -1); len(mrFont) > 0 {
for _, item := range mrFont {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("%s", item[2]))
//ReplaceAllLiteral(res, []byte(fmt.Sprintf("#%s#%s#", item[1], item[2])))
}
}
if mrBold := reBold.FindAllStringSubmatch(res, -1); len(mrBold) > 0 {
for _, item := range mrBold {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrStrong := reStrong.FindAllStringSubmatch(res, -1); len(mrStrong) > 0 {
for _, item := range mrStrong {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrItalic := reItalic.FindAllStringSubmatch(res, -1); len(mrItalic) > 0 {
for _, item := range mrItalic {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("_%s_", item[1]))
}
}
if mrSpan := reSpan.FindAllStringSubmatch(res, -1); len(mrSpan) > 0 {
for _, item := range mrSpan {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrCenter := reCenter.FindAllStringSubmatch(res, -1); len(mrCenter) > 0 {
for _, item := range mrCenter {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrA := reA.FindAllStringSubmatch(res, -1); len(mrA) > 0 {
for _, item := range mrA {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("[%s](%s)", item[2], item[1]))
}
}
res = strings.Replace(res, " ", " ", -1)
res = regexp.MustCompile("</?p>").ReplaceAllLiteralString(res, "")
return string(res)
}
func BlockTypeToString(typeId int8) string {
if typeId == 0 || typeId == 1 {
return "Игрок"
}
return "Команда"
}
func PrettyTimePrint(d time.Duration, nominative bool) (res *bytes.Buffer) {
var s string
res = bytes.NewBufferString(s)
//defer res.Close()
if (d / 3600) > 0 {
//res.WriteString(fmt.Sprintf("%d часов ", d/3600))
switch d / 3600 {
case 1, 21, 31, 41, 51:
res.WriteString(fmt.Sprintf("%d час ", d/3600))
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d часа ", d/3600))
default:
res.WriteString(fmt.Sprintf("%d часов ", d/3600))
}
}
if (d/60)%60 > 0 {
switch (d / 60) % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d минута ", (d/60)%60))
} else {
res.WriteString(fmt.Sprintf("%d минуту ", (d/60)%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d минуты ", (d/60)%60))
default:
res.WriteString(fmt.Sprintf("%d минут ", (d/60)%60))
}
}
if d%60 > 0 {
switch d % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d секунда", d%60))
} else {
res.WriteString(fmt.Sprintf("%d секунду", d%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d секунды", d%60))
default:
res.WriteString(fmt.Sprintf("%d секунд", d%60))
}
}
return
}
| {
var (
parser = html.NewTokenizer(strings.NewReader(text))
tagStack = stack.New()
textToTag = map[int]string{}
)
for {
node := parser.Next()
switch node {
case html.ErrorToken:
result := strings.Replace(textToTag[0], " ", " ", -1)
return result
case html.TextToken:
t := string(parser.Text())
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], t}, "")
case html.StartTagToken:
tagName, hasAttr := parser.TagName()
if string(tagName) == scriptTag {
// We can skip script tags, as they are invisible for the user, but we can indicate that there are
// scripts in the task. To skip tag, it is necessary to call Next() two times:
// 1) returns TextToken with the script body
// 2) returns EndTagToken for the closed script tag
// Usually script tag doesn't have any neste tags, so this aproach should work
log.Printf("[INFO] Skipping script tag")
parser.Next()
parser.Next()
continue
}
tag := Tag{Tag: string(tagName), Attrs: map[string]string{}}
if hasAttr {
for {
attr, val, moreAttr := parser.TagAttr()
if DEBUG {
log.Printf("[DEBUG] Found attr %s", attr)
}
tag.Attrs[string(attr)] = string(val)
if !moreAttr {
break
}
}
}
if DEBUG {
log.Printf("[DEBUG] Found tag %q", tag)
}
tagStack.Push(tag)
case html.EndTagToken:
var (
addText string
tagNo = tagStack.Len()
tag = tagStack.Pop()
closedTag, _ = parser.TagName()
)
if tag.(Tag).Tag != string(closedTag) {
log.Printf("[WARNING] Found closed tag %q but expected %q", closedTag, tag)
continue
}
if DEBUG {
log.Printf("[DEBUG] Found end of tag %q", closedTag)
}
switch tag.(Tag).Tag {
case iTag:
addText = fmt.Sprintf("_%s_", textToTag[tagNo])
case bTag, strongTag:
addText = fmt.Sprintf("*%s*", textToTag[tagNo])
case aTag:
// if strings.Compare(string(attr), "href") == 0 {
addText = fmt.Sprintf("[%s](%s)", textToTag[tagNo], tag.(Tag).Attrs["href"])
// }
default:
addText = textToTag[tagNo]
}
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], addText}, "")
delete(textToTag, tagNo)
}
}
} | identifier_body |
helpers.go | package en
import (
"bytes"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang-collections/collections/stack"
"golang.org/x/net/html"
)
// Tag string type that corresponds to the html tags
type Tag struct {
Tag string
Attrs map[string]string
}
const (
iTag string = "i"
bTag string = "b"
strongTag string = "strong"
scriptTag string = "script"
aTag string = "a"
)
type Coordinate struct {
Lat float64 `json:"lattitude"`
Lon float64 `json:"longtitude"`
OriginalString string `json:"name"`
}
type Coordinates []Coordinate
func (c Coordinate) String() (text string) {
text = fmt.Sprintf("%s (%f, %f)", c.OriginalString, c.Lat, c.Lon)
return
}
// Image stores data for the images in the text, e.g. URL to download the image,
// Filepath - path where file was downloaded
type Image struct {
URL string
Caption string
Filepath string
}
// Images - array of Image objects
type Images []Image
func extractCoordinates(text string, re *regexp.Regexp) (string, Coordinates) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
coords = Coordinates{}
)
if len(mr) > 0 {
for _, item := range mr {
lon, _ := strconv.ParseFloat(item[1], 64)
lat, _ := strconv.ParseFloat(item[2], 64)
if len(item) > 3 {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[3]})
} else {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[0]})
}
result = regexp.MustCompile(item[0]).ReplaceAllLiteralString(result, "#coords#")
}
}
return result, coords
}
// ExtractCoordinates extracts coordinates from the given text and returns the updated string
// with replaced coordinates and the list of coordinates
func ExtractCoordinates(text string) (string, Coordinates) {
var (
// <a href="geo:49.976136, 36.267256">49.976136, 36.267256</a>
geoHrefRe = regexp.MustCompile("<a.+?href=\"geo:(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})\">(.+?)</a>")
// <a href="https://www.google.com.ua/maps/@50.0363257,36.2120039,19z" target="blank">50.036435 36.211914</a>
hrefRe = regexp.MustCompile("<a.+?href=\"https?://.+?(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,}).*?\">(.+?)</a>")
// 49.976136, 36.267256
numbersRe = regexp.MustCompile("(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})")
res = text
coords = Coordinates{}
tmpCoords Coordinates
)
log.Print("[INFO] Extract coordinates from task text")
for _, re := range []*regexp.Regexp{geoHrefRe, hrefRe, numbersRe} {
res, tmpCoords = extractCoordinates(res, re)
coords = append(coords, tmpCoords...)
}
for _, coord := range coords {
res = strings.Replace(res, "#coords#", coord.OriginalString, 1)
}
if DEBUG {
log.Printf("[DEBUG] Found %d coordinates", len(coords))
}
return res, coords
}
func extractImages(text string, re *regexp.Regexp, caption string, start int) (string, Images) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
images = Images{}
)
if len(mr) > 0 {
for i, item := range mr {
images = append(images, Image{URL: item[1], Caption: fmt.Sprintf("%s #%d", caption, start+i)})
result = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(result, fmt.Sprintf("%s #%d", caption, start+i))
}
}
return result, images
}
// ExtractImages extracts images from the given text and returns the updated
// version of the text and the list of images
func ExtractImages(text string, caption string) (string, Images) {
var (
reImg = regexp.MustCompile("<img.+?src=\"\\s*(https?://.+?)\\s*\".*?>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(https?://.+?\\.(jpg|png|bmp))\\\\?\".*?>(.*?)</a>")
result = text
images = Images{}
tmpImages Images
)
//log.Printf("Before image replacing: %s", text)
log.Print("[INFO] Extract images from task text")
for _, re := range []*regexp.Regexp{reImg, reA} {
result, tmpImages = extractImages(result, re, caption, len(images)+1)
images = append(images, tmpImages...)
}
if DEBUG {
log.Printf("[DEBUG] Found %d images", len(images))
}
return result, images
}
// ReplaceHTMLTags finds all html tags and removes them. Some tags like bold, italic are replaed with
// makrkups for telegram
func ReplaceHTMLTags(text string) string {
var (
parser = html.NewTokenizer(strings.NewReader(text))
tagStack = stack.New()
textToTag = map[int]string{}
)
for {
node := parser.Next()
switch node {
case html.ErrorToken:
result := strings.Replace(textToTag[0], " ", " ", -1)
return result
case html.TextToken:
t := string(parser.Text())
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], t}, "")
case html.StartTagToken:
tagName, hasAttr := parser.TagName()
if string(tagName) == scriptTag {
// We can skip script tags, as they are invisible for the user, but we can indicate that there are
// scripts in the task. To skip tag, it is necessary to call Next() two times:
// 1) returns TextToken with the script body
// 2) returns EndTagToken for the closed script tag
// Usually script tag doesn't have any neste tags, so this aproach should work
log.Printf("[INFO] Skipping script tag")
parser.Next()
parser.Next()
continue
}
tag := Tag{Tag: string(tagName), Attrs: map[string]string{}}
if hasAttr {
for {
attr, val, moreAttr := parser.TagAttr()
if DEBUG {
log.Printf("[DEBUG] Found attr %s", attr)
}
tag.Attrs[string(attr)] = string(val)
if !moreAttr {
break
}
}
}
if DEBUG {
log.Printf("[DEBUG] Found tag %q", tag)
}
tagStack.Push(tag)
case html.EndTagToken:
var (
addText string
tagNo = tagStack.Len()
tag = tagStack.Pop()
closedTag, _ = parser.TagName()
)
if tag.(Tag).Tag != string(closedTag) {
log.Printf("[WARNING] Found closed tag %q but expected %q", closedTag, tag)
continue
}
if DEBUG {
log.Printf("[DEBUG] Found end of tag %q", closedTag)
}
switch tag.(Tag).Tag {
case iTag:
addText = fmt.Sprintf("_%s_", textToTag[tagNo])
case bTag, strongTag:
addText = fmt.Sprintf("*%s*", textToTag[tagNo])
case aTag:
// if strings.Compare(string(attr), "href") == 0 {
addText = fmt.Sprintf("[%s](%s)", textToTag[tagNo], tag.(Tag).Attrs["href"])
// }
default:
addText = textToTag[tagNo]
}
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], addText}, "")
delete(textToTag, tagNo)
}
}
}
// ReplaceCommonTags deprecated - should be removed!!!
func ReplaceCommonTags(text string) string {
log.Print("Replace html tags")
var (
reBr = regexp.MustCompile("<br\\s*/?>")
reHr = regexp.MustCompile("<hr.*?/?>")
reP = regexp.MustCompile("<p>([^ ]+?)</p>")
reBold = regexp.MustCompile("<b.*?/?>((?s:.*?))</b>") | reCenter = regexp.MustCompile("<center>((?s:.*?))</center>")
reFont = regexp.MustCompile("<font.+?color\\s*=\\\\?[\"«]?#?(\\w+)\\\\?[\"»]?.*?>((?s:.*?))</font>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(.+?)\\\\?\".*?>(.+?)</a>")
res = text
)
res = strings.Replace(text, "_", "\\_", -1)
if mrBr := reBr.FindAllStringSubmatch(text, -1); len(mrBr) > 0 {
for _, item := range mrBr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrHr := reHr.FindAllStringSubmatch(res, -1); len(mrHr) > 0 {
for _, item := range mrHr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrP := reP.FindAllStringSubmatch(res, -1); len(mrP) > 0 {
for _, item := range mrP {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("\n%s", item[1]))
}
}
if mrFont := reFont.FindAllStringSubmatch(res, -1); len(mrFont) > 0 {
for _, item := range mrFont {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("%s", item[2]))
//ReplaceAllLiteral(res, []byte(fmt.Sprintf("#%s#%s#", item[1], item[2])))
}
}
if mrBold := reBold.FindAllStringSubmatch(res, -1); len(mrBold) > 0 {
for _, item := range mrBold {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrStrong := reStrong.FindAllStringSubmatch(res, -1); len(mrStrong) > 0 {
for _, item := range mrStrong {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrItalic := reItalic.FindAllStringSubmatch(res, -1); len(mrItalic) > 0 {
for _, item := range mrItalic {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("_%s_", item[1]))
}
}
if mrSpan := reSpan.FindAllStringSubmatch(res, -1); len(mrSpan) > 0 {
for _, item := range mrSpan {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrCenter := reCenter.FindAllStringSubmatch(res, -1); len(mrCenter) > 0 {
for _, item := range mrCenter {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrA := reA.FindAllStringSubmatch(res, -1); len(mrA) > 0 {
for _, item := range mrA {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("[%s](%s)", item[2], item[1]))
}
}
res = strings.Replace(res, " ", " ", -1)
res = regexp.MustCompile("</?p>").ReplaceAllLiteralString(res, "")
return string(res)
}
func BlockTypeToString(typeId int8) string {
if typeId == 0 || typeId == 1 {
return "Игрок"
}
return "Команда"
}
func PrettyTimePrint(d time.Duration, nominative bool) (res *bytes.Buffer) {
var s string
res = bytes.NewBufferString(s)
//defer res.Close()
if (d / 3600) > 0 {
//res.WriteString(fmt.Sprintf("%d часов ", d/3600))
switch d / 3600 {
case 1, 21, 31, 41, 51:
res.WriteString(fmt.Sprintf("%d час ", d/3600))
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d часа ", d/3600))
default:
res.WriteString(fmt.Sprintf("%d часов ", d/3600))
}
}
if (d/60)%60 > 0 {
switch (d / 60) % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d минута ", (d/60)%60))
} else {
res.WriteString(fmt.Sprintf("%d минуту ", (d/60)%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d минуты ", (d/60)%60))
default:
res.WriteString(fmt.Sprintf("%d минут ", (d/60)%60))
}
}
if d%60 > 0 {
switch d % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d секунда", d%60))
} else {
res.WriteString(fmt.Sprintf("%d секунду", d%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d секунды", d%60))
default:
res.WriteString(fmt.Sprintf("%d секунд", d%60))
}
}
return
} | reStrong = regexp.MustCompile("<strong.*?>(.*?)</strong>")
reItalic = regexp.MustCompile("<i>((?s:.+?))</i>")
reSpan = regexp.MustCompile("<span.*?>(.*?)</span>") | random_line_split |
helpers.go | package en
import (
"bytes"
"fmt"
"log"
"regexp"
"strconv"
"strings"
"time"
"github.com/golang-collections/collections/stack"
"golang.org/x/net/html"
)
// Tag string type that corresponds to the html tags
type Tag struct {
Tag string
Attrs map[string]string
}
const (
iTag string = "i"
bTag string = "b"
strongTag string = "strong"
scriptTag string = "script"
aTag string = "a"
)
type Coordinate struct {
Lat float64 `json:"lattitude"`
Lon float64 `json:"longtitude"`
OriginalString string `json:"name"`
}
type Coordinates []Coordinate
func (c Coordinate) String() (text string) {
text = fmt.Sprintf("%s (%f, %f)", c.OriginalString, c.Lat, c.Lon)
return
}
// Image stores data for the images in the text, e.g. URL to download the image,
// Filepath - path where file was downloaded
type Image struct {
URL string
Caption string
Filepath string
}
// Images - array of Image objects
type Images []Image
func | (text string, re *regexp.Regexp) (string, Coordinates) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
coords = Coordinates{}
)
if len(mr) > 0 {
for _, item := range mr {
lon, _ := strconv.ParseFloat(item[1], 64)
lat, _ := strconv.ParseFloat(item[2], 64)
if len(item) > 3 {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[3]})
} else {
coords = append(coords, Coordinate{Lat: lon, Lon: lat, OriginalString: item[0]})
}
result = regexp.MustCompile(item[0]).ReplaceAllLiteralString(result, "#coords#")
}
}
return result, coords
}
// ExtractCoordinates extracts coordinates from the given text and returns the updated string
// with replaced coordinates and the list of coordinates
func ExtractCoordinates(text string) (string, Coordinates) {
var (
// <a href="geo:49.976136, 36.267256">49.976136, 36.267256</a>
geoHrefRe = regexp.MustCompile("<a.+?href=\"geo:(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})\">(.+?)</a>")
// <a href="https://www.google.com.ua/maps/@50.0363257,36.2120039,19z" target="blank">50.036435 36.211914</a>
hrefRe = regexp.MustCompile("<a.+?href=\"https?://.+?(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,}).*?\">(.+?)</a>")
// 49.976136, 36.267256
numbersRe = regexp.MustCompile("(\\d{2}[.,]\\d{3,}),?\\s*(\\d{2}[.,]\\d{3,})")
res = text
coords = Coordinates{}
tmpCoords Coordinates
)
log.Print("[INFO] Extract coordinates from task text")
for _, re := range []*regexp.Regexp{geoHrefRe, hrefRe, numbersRe} {
res, tmpCoords = extractCoordinates(res, re)
coords = append(coords, tmpCoords...)
}
for _, coord := range coords {
res = strings.Replace(res, "#coords#", coord.OriginalString, 1)
}
if DEBUG {
log.Printf("[DEBUG] Found %d coordinates", len(coords))
}
return res, coords
}
func extractImages(text string, re *regexp.Regexp, caption string, start int) (string, Images) {
var (
result = text
mr = re.FindAllStringSubmatch(text, -1)
images = Images{}
)
if len(mr) > 0 {
for i, item := range mr {
images = append(images, Image{URL: item[1], Caption: fmt.Sprintf("%s #%d", caption, start+i)})
result = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(result, fmt.Sprintf("%s #%d", caption, start+i))
}
}
return result, images
}
// ExtractImages extracts images from the given text and returns the updated
// version of the text and the list of images
func ExtractImages(text string, caption string) (string, Images) {
var (
reImg = regexp.MustCompile("<img.+?src=\"\\s*(https?://.+?)\\s*\".*?>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(https?://.+?\\.(jpg|png|bmp))\\\\?\".*?>(.*?)</a>")
result = text
images = Images{}
tmpImages Images
)
//log.Printf("Before image replacing: %s", text)
log.Print("[INFO] Extract images from task text")
for _, re := range []*regexp.Regexp{reImg, reA} {
result, tmpImages = extractImages(result, re, caption, len(images)+1)
images = append(images, tmpImages...)
}
if DEBUG {
log.Printf("[DEBUG] Found %d images", len(images))
}
return result, images
}
// ReplaceHTMLTags finds all html tags and removes them. Some tags like bold, italic are replaed with
// makrkups for telegram
func ReplaceHTMLTags(text string) string {
var (
parser = html.NewTokenizer(strings.NewReader(text))
tagStack = stack.New()
textToTag = map[int]string{}
)
for {
node := parser.Next()
switch node {
case html.ErrorToken:
result := strings.Replace(textToTag[0], " ", " ", -1)
return result
case html.TextToken:
t := string(parser.Text())
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], t}, "")
case html.StartTagToken:
tagName, hasAttr := parser.TagName()
if string(tagName) == scriptTag {
// We can skip script tags, as they are invisible for the user, but we can indicate that there are
// scripts in the task. To skip tag, it is necessary to call Next() two times:
// 1) returns TextToken with the script body
// 2) returns EndTagToken for the closed script tag
// Usually script tag doesn't have any neste tags, so this aproach should work
log.Printf("[INFO] Skipping script tag")
parser.Next()
parser.Next()
continue
}
tag := Tag{Tag: string(tagName), Attrs: map[string]string{}}
if hasAttr {
for {
attr, val, moreAttr := parser.TagAttr()
if DEBUG {
log.Printf("[DEBUG] Found attr %s", attr)
}
tag.Attrs[string(attr)] = string(val)
if !moreAttr {
break
}
}
}
if DEBUG {
log.Printf("[DEBUG] Found tag %q", tag)
}
tagStack.Push(tag)
case html.EndTagToken:
var (
addText string
tagNo = tagStack.Len()
tag = tagStack.Pop()
closedTag, _ = parser.TagName()
)
if tag.(Tag).Tag != string(closedTag) {
log.Printf("[WARNING] Found closed tag %q but expected %q", closedTag, tag)
continue
}
if DEBUG {
log.Printf("[DEBUG] Found end of tag %q", closedTag)
}
switch tag.(Tag).Tag {
case iTag:
addText = fmt.Sprintf("_%s_", textToTag[tagNo])
case bTag, strongTag:
addText = fmt.Sprintf("*%s*", textToTag[tagNo])
case aTag:
// if strings.Compare(string(attr), "href") == 0 {
addText = fmt.Sprintf("[%s](%s)", textToTag[tagNo], tag.(Tag).Attrs["href"])
// }
default:
addText = textToTag[tagNo]
}
textToTag[tagStack.Len()] = strings.Join([]string{textToTag[tagStack.Len()], addText}, "")
delete(textToTag, tagNo)
}
}
}
// ReplaceCommonTags deprecated - should be removed!!!
func ReplaceCommonTags(text string) string {
log.Print("Replace html tags")
var (
reBr = regexp.MustCompile("<br\\s*/?>")
reHr = regexp.MustCompile("<hr.*?/?>")
reP = regexp.MustCompile("<p>([^ ]+?)</p>")
reBold = regexp.MustCompile("<b.*?/?>((?s:.*?))</b>")
reStrong = regexp.MustCompile("<strong.*?>(.*?)</strong>")
reItalic = regexp.MustCompile("<i>((?s:.+?))</i>")
reSpan = regexp.MustCompile("<span.*?>(.*?)</span>")
reCenter = regexp.MustCompile("<center>((?s:.*?))</center>")
reFont = regexp.MustCompile("<font.+?color\\s*=\\\\?[\"«]?#?(\\w+)\\\\?[\"»]?.*?>((?s:.*?))</font>")
reA = regexp.MustCompile("<a.+?href=\\\\?\"(.+?)\\\\?\".*?>(.+?)</a>")
res = text
)
res = strings.Replace(text, "_", "\\_", -1)
if mrBr := reBr.FindAllStringSubmatch(text, -1); len(mrBr) > 0 {
for _, item := range mrBr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrHr := reHr.FindAllStringSubmatch(res, -1); len(mrHr) > 0 {
for _, item := range mrHr {
res = regexp.MustCompile(item[0]).ReplaceAllLiteralString(res, "\n")
}
}
if mrP := reP.FindAllStringSubmatch(res, -1); len(mrP) > 0 {
for _, item := range mrP {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("\n%s", item[1]))
}
}
if mrFont := reFont.FindAllStringSubmatch(res, -1); len(mrFont) > 0 {
for _, item := range mrFont {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("%s", item[2]))
//ReplaceAllLiteral(res, []byte(fmt.Sprintf("#%s#%s#", item[1], item[2])))
}
}
if mrBold := reBold.FindAllStringSubmatch(res, -1); len(mrBold) > 0 {
for _, item := range mrBold {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrStrong := reStrong.FindAllStringSubmatch(res, -1); len(mrStrong) > 0 {
for _, item := range mrStrong {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("*%s*", item[1]))
}
}
if mrItalic := reItalic.FindAllStringSubmatch(res, -1); len(mrItalic) > 0 {
for _, item := range mrItalic {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("_%s_", item[1]))
}
}
if mrSpan := reSpan.FindAllStringSubmatch(res, -1); len(mrSpan) > 0 {
for _, item := range mrSpan {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrCenter := reCenter.FindAllStringSubmatch(res, -1); len(mrCenter) > 0 {
for _, item := range mrCenter {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, item[1])
}
}
if mrA := reA.FindAllStringSubmatch(res, -1); len(mrA) > 0 {
for _, item := range mrA {
res = regexp.MustCompile(regexp.QuoteMeta(item[0])).
ReplaceAllLiteralString(res, fmt.Sprintf("[%s](%s)", item[2], item[1]))
}
}
res = strings.Replace(res, " ", " ", -1)
res = regexp.MustCompile("</?p>").ReplaceAllLiteralString(res, "")
return string(res)
}
func BlockTypeToString(typeId int8) string {
if typeId == 0 || typeId == 1 {
return "Игрок"
}
return "Команда"
}
func PrettyTimePrint(d time.Duration, nominative bool) (res *bytes.Buffer) {
var s string
res = bytes.NewBufferString(s)
//defer res.Close()
if (d / 3600) > 0 {
//res.WriteString(fmt.Sprintf("%d часов ", d/3600))
switch d / 3600 {
case 1, 21, 31, 41, 51:
res.WriteString(fmt.Sprintf("%d час ", d/3600))
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d часа ", d/3600))
default:
res.WriteString(fmt.Sprintf("%d часов ", d/3600))
}
}
if (d/60)%60 > 0 {
switch (d / 60) % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d минута ", (d/60)%60))
} else {
res.WriteString(fmt.Sprintf("%d минуту ", (d/60)%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d минуты ", (d/60)%60))
default:
res.WriteString(fmt.Sprintf("%d минут ", (d/60)%60))
}
}
if d%60 > 0 {
switch d % 60 {
case 1, 21, 31, 41, 51:
if nominative {
res.WriteString(fmt.Sprintf("%d секунда", d%60))
} else {
res.WriteString(fmt.Sprintf("%d секунду", d%60))
}
case 2, 3, 4, 22, 23, 24, 32, 33, 34, 42, 43, 44, 52, 53, 54:
res.WriteString(fmt.Sprintf("%d секунды", d%60))
default:
res.WriteString(fmt.Sprintf("%d секунд", d%60))
}
}
return
}
| extractCoordinates | identifier_name |
trainPredictor.py | #!/usr/bin/env python3
from builtins import zip
from builtins import str
from builtins import range
import sys
import os
import json
import pickle
import traceback
import numpy as np
import time
import datetime as dtime
from progressbar import ProgressBar, ETA, Bar, Percentage
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from utils.UrlUtils import UrlUtils
#from utils.contextUtils import toContext
def toContext(process,exitv,message):
|
pathjoin = os.path.join
pathexists = os.path.exists
mdy = dtime.datetime.now().strftime('%m%d%y')
product_type = 'interferogram'
cache_dir = 'cached'
train_folds = np.inf # inf = leave-one-out, otherwise k-fold cross validation
train_state = 42 # random seed
train_verbose = 0
train_jobs = -1
cv_type = 'loo' if train_folds==np.inf else '%d-fold'%train_folds
cv_probs = True # record prediction probabilities in addition to labels
scorefn = {} # map from name (e.g., mse) -> f(y_true,y_pred)
scorefn['precision'] = lambda te,pr,ul: precision_score(te,pr,labels=ul)
scorefn['recall'] = lambda te,pr,ul: recall_score(te,pr,labels=ul)
errorfn = {} # map from name (e.g., diff) -> f(y_true,y_pred)
errorfn['match'] = lambda y_true,y_pred: y_true==y_pred
# GRID SEARCH PARAMS FOR PARAMETER TUNING ######################################
gridcv_folds = 2 # number of cross-validation folds per gridcv parameter
gridcv_jobs = -1 # -1 = use all cores
gridcv_verbose = 0 # verbosity level of model-tuning cross-validation output
gridcv_score = 'roc_auc'
# SKLEARN MODEL SPECIFICATIONS #################################################
### Random Forest ##############################################################
rf_trees = 500
rf_feats = np.linspace(0.1,1.0,5)
rf_depth = [2,4,7,10,25]
rf_jobs = 1 if gridcv_jobs == -1 else -1 # multiprocessing + RandomForest don't play nice
rf_tuned = {'max_features':rf_feats,'max_depth':rf_depth}
rf_defaults = {
'n_estimators': rf_trees,'max_features':'sqrt','n_jobs':rf_jobs,
'verbose':train_verbose,'random_state':train_state,
'criterion':'gini','class_weight':'balanced_subsample'
}
### XGBoost ####################################################################
xgb_depth = [3,4,5,10,25]
xgb_subsample = np.linspace(0.1,1,5)
xgb_default = {
'n_estimators':rf_trees,'max_delta_step':1,'learning_rate':0.1,
'objective':'binary:logistic','max_depth':3,'subsample':0.5,
'colsample_bytree':1,'subsample':1,'silent':(not train_verbose),
'seed':train_state,'nthread':train_jobs
}
xgb_tuned = {'learning_rate':[0.001,0.01,0.05,0.1,0.25,0.33],
'max_depth':xgb_depth,'subsample':xgb_subsample}
def loadjson(jsonfile):
with open(jsonfile,'r') as fid:
return json.load(fid)
def dumpjson(objdict,jsonfile):
with open(jsonfile,'w') as fid:
return json.dump(fid,objdict)
def url2pid(url):
"""
url2pid(url): convert url to product id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- product id for url
"""
if url.endswith('/'):
url = url[:-1]
urlsplit = url.split('/')
return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def url2featid(url,product_type):
"""
url2pid(url): convert url to feature id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- feature id for url
"""
return url.replace(product_type,'features').replace('features__','features_'+product_type+'__')
def fdict2vec(featdict,clfinputs):
'''
extract feature vector from dict given classifier parameters
specifying which features to use
'''
fvec = []
try:
featspec = clfinputs['features']
featorder = featspec['feature_order']
featdims = featspec['feature_dims']
cohthr = featspec['cohthr10']
featscoh = featdict['%d'%cohthr]
for fid,fdim in zip(featorder,featdims):
flist = featscoh[fid]
if not isinstance(flist,list):
flist = [flist]
assert(len(flist) == fdim)
fvec.extend(flist)
except Exception:
pass
return fvec
def curlProductMeta(prod_url,verbose=False,remove=True):
"""
curlProductMeta(prod_url,verbose=False)
Arguments:
- prod_url: product url
Keyword Arguments:
- verbose: verbose output (default=False)
Returns: metadata dict from product .met.json
"""
if prod_url.endswith('/'):
prod_url = prod_url[:-1]
prod_json = url2pid(prod_url) + '.met.json'
try:
uu = UrlUtils()
silentoutput = ' ' if verbose else ' --silent '
userstr = uu.dav_u + ':' + uu.dav_p
command = 'curl' + silentoutput + '-k -f -u' + userstr + ' -O ' + pathjoin(prod_url,prod_json)
os.system(command)
except Exception:
return {}
if not pathexists(prod_json):
return {}
meta = loadjson(prod_json)
if remove:
os.remove(prod_json)
return meta
def getFeatures(url,clfinputs,product_type='interferogram'):
'''
retrieves feature vector for the given product url, provided clfinputs
'''
featurl = url2featid(url,product_type)
featdict = curlProductMeta(featurl)
fvec = fdict2vec(featdict,clfinputs)
return fvec
def loadQuery(querymeta,queryoptions=[],queryoutfile=None,cache=False):
'''
builds/posts the faceted search query specified in querymeta and dumps the
result to queryoutfile. if queryoutfile already exists, the query is loaded from
disk rather than executed.
'''
if not cache or not pathexists(queryoutfile):
print('executing faceted search query...')
from utils.queryBuilder import postQuery, buildQuery
from utils.contextUtils import toContext
ret,status = postQuery(buildQuery(querymeta,queryoptions))
if cache and status:
# only dump the query if caching enabled and postQuery succeeds
with open(queryoutfile,'wb') as fid:
pickle.dump(ret,fid)
elif cache:
print('loading cached query from %s...'%queryoutfile)
with open(queryoutfile,'rb') as fid:
ret = pickle.load(fid)
print('query returned %d products'%len(ret))
return ret
def loadClassmap(cmapjson):
"""
loadClassmap(cmapjson) - loads classmap file,
substitutes '_', for '-' as necessary
Arguments:
- cmapjson: classmap .json file
Keyword Arguments:
None
Returns: classmap with substitutions
"""
initialmap = loadjson(cmapjson)
classmap = initialmap.copy()
# substitute '-' with '_' (for user-tagged typos)
tags = list(initialmap.keys())
for tag in tags:
if '-' in tag:
classmap[tag.replace('-','_')] = classmap[tag]
return classmap
def loadPredictorSpec(clfjson):
"""
loadPredictorSpec(clfjson)
Arguments:
- clfjson: json file specifying classifier parameters
Keyword Arguments:
None
Returns: dict containing classifier parameters,
including (but not limited to):
- classmap: classmap to map user tags to labels
- features: dict containing information about features used to train classifier
"""
clfspec = loadjson(clfjson)
clfspec['classmap'] = loadClassmap(clfspec["classmap_file"])
clfspec['features'] = loadjson(clfspec["feat_file"])
return clfspec
def dumpPredictorSpec(inputs):
clfspec = {}
clfspec['clf_file'] = inputs['clf_name']+'.pkl'
for key in ['clf_type','classmap','feat_file']:
clfspec[key] = inputs[key]
json.dump(clfspec,inputs['clf_name']+'.json')
def PredictorSpec(inputjson):
clfspec['clf_file'] = inputs['clf_file']
clfspec['classmap'] = inputs["classmap_file"]
clfspec['features'] = inputs("feat_file")
def usertags2label(usertags,classmap):
'''
return dictionary of matched (tag,label) pairs in classmap for all tags
returns {} if none of the tags are present in classmap
'''
labelmap = {}
for tag in usertags:
tag = tag.strip()
for k,v in list(classmap.items()):
if tag.count(k):
labelmap[tag] = v
return labelmap
def queryAllTags(taglist,cache=False):
'''
return all urls with user tags present in taglist
'''
tagpkl = pathjoin(cache_dir,"usertags.pkl")
tagquery = {'dataset_type':product_type,'tags':taglist}
querylist = loadQuery(tagquery,cache=cache,queryoutfile=tagpkl)
querydict = {}
for product in querylist:
purl = product['url']
querydict[purl] = product
return querydict
def collectUrlTags(urllist,querymeta={}):
"""
collectUrlTags(urllist,querymeta={})
collects user tags for a list of urls
Arguments:
- urllist: list of urls
Keyword Arguments:
- querymeta: (default={})
Returns: dict keyed on product id containing
- url: input url
- user_tags: tags for input url
"""
tagdict = {}
nurl = len(urllist)
for i,url in enumerate(urllist):
if url in querymeta: # use the query input if possible
meta = querymeta[url]
else: # otherwise retrieve product metadata via curl
meta = curlProductMeta(url)
tagdict[url2pid(url)] = {'url':url,'user_tags':meta.get('user_tags',[])}
return tagdict
def collectTrainingData(urls,clfinputs,cache=False):
'''
construct matrix of training samples X with labels y by intersecting the set of
IGMs with extracted features (featquery) with the set of tagged IGMs (taggedquery)
Keep only IGMs with tags present in classmap, and select/validate features
according to the parameters in clfinputs.
Returns: dict containing:
- tags: list of user tags used to select training samples
- X, y: training samples, labels
- traintags: tags for each training sample
- trainurls: url for each training sample
- skiplist: list of urls which could not be retrieved due to errors
- errors: list of error strings for each url in skiplist
'''
classmap = clfinputs['classmap']
tags = sorted(list(classmap.keys()))
traindatpkl = pathjoin(cache_dir,"traindat.pkl")
if cache and pathexists(traindatpkl):
print('loading training data from %s...'%traindatpkl)
with open(traindatpkl,'rb') as fid:
ret = pickle.load(fid)
# make sure the set of tags match
if all([ret['tags'][i] == tags[i] for i in range(len(tags))]):
return ret
print("querying %d tags"%len(tags))
querymeta = queryAllTags(tags,cache=cache)
if len(urls)==0:
print('no URLs provided, training using all tags in classmap')
# construct/run query to get metadata for all products with given tags
urls = list(querymeta.keys())
elif isinstance(urls,str):
urls = [urls]
tagdict = collectUrlTags(urls,querymeta=querymeta)
ntagged = len(tagdict)
X,y = [],[]
traintags,trainurls = [],[]
errors,skiplist = [],[]
widgets = ['collecting features for %d products'%ntagged, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=ntagged).start()
for i,pid in enumerate(tagdict):
tdict = tagdict[pid]
turl,ttags = tdict['url'],tdict['user_tags']
taglabel = usertags2label(ttags,classmap)
if len(taglabel) == 0:
continue
fvec = getFeatures(turl,clfinputs)
if len(fvec)==0:
errmsg = "error collecting features for product %s (skipped)"%pid
errors.append(errmsg)
skiplist.append(turl)
continue
pidtags,pidlabs = list(taglabel.keys()),list(taglabel.values())
if len(pidtags) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
elif len(pidtags) > 1:
ulab = np.unique(pidlabs)
if len(ulab) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
else:
errmsg = "conflicting tags (%s) for product %s, skipped"%(pidtags,pid)
errors.append(errmsg)
skiplist.append(turl)
pbar.update(i)
pbar.finish()
# sort products by product url to ensure identical ordering of X,y
sorti = np.argsort(trainurls)
print('collected', len(sorti), 'training samples (skipped %d)'%len(skiplist))
X,y = np.array(X)[sorti,:],np.array(y)[sorti]
traintags,trainurls = np.array(traintags)[sorti],np.array(trainurls)[sorti]
ret = {'tags':tags,'X':X,'y':y,'traintags':traintags,'trainurls':trainurls,
'skiplist':skiplist,'errors':errors}
if cache:
with open(traindatpkl,'wb') as fid:
pickle.dump(ret,fid)
print('saved training data to %s'%traindatpkl)
return ret
def train(X_train,y_train,clfinputs,**kwargs):
"""
train(X_train,y_train,clfinputs,**kwargs)
train a classifier with parameter tuning via gridsearchcv
Arguments:
- X_train: training data (N x n matrix)
- y_train: training labels (N x 1 vector)
- clfinputs: classifier spec
Keyword Arguments:
None
Returns:
- clf: tuned classifier
- cv: cross validation struct used to tune classifier
"""
uy = np.unique(y_train)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return None,None
model_id = clfinputs['clf_type']
if model_id == 'rf':
model_clf = RandomForestClassifier(**rf_defaults)
model_tuned = [rf_tuned]
else:
print("invalid clf_type")
return {}
clf = clone(model_clf)
if model_tuned is not None and len(model_tuned) != 0 and \
len(model_tuned[0]) != 0:
cv = GridSearchCV(clf,model_tuned,cv=gridcv_folds,scoring=gridcv_score,
n_jobs=gridcv_jobs,verbose=gridcv_verbose,refit=True)
cv.fit(X_train, y_train)
clf = cv.best_estimator_
else: # no parameter tuning
clf.fit(X_train,y_train)
return clf,cv
def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):
"""
crossValidatePredictor(X,y,clfinputs,logfile='cvout.log')
use cross validation to assess the quality of a specified classifier
Arguments:
- X: training data
- y: training labels
- clfinputs: dict of classifier inputs
Keyword Arguments:
- logfile: cross-validation outfile (default='cvout.log')
Returns:
- dict containing:
- models: model for each cross validation fold
- scores: scores for each fold according to each scorefn
- preds: predictions for each training sample
- errors: errors for each training sample according to each errorfn
- modelcvs: cross validation structure used to train each model
"""
models,modelcvs,preds,probs = [],[],[],[]
scores = dict([(key,[]) for key in list(scorefn.keys())])
errors = dict([(key,[]) for key in list(errorfn.keys())])
# validate class labels
uy = np.unique(y)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return {}
N,ymin = len(y),uy[0]
if cv_type == 'loo':
cv = KFold(N,n_folds=N,random_state=train_state)
y_pred = np.zeros(N)
y_prob = np.zeros(N)
else:
cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)
n_folds = len(cv)
model_id = clfinputs['clf_type']
widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()
with open(logfile,'w') as logfid:
cv_test_index = []
scorekeys = sorted(scores.keys())
for i,(train_index,test_index) in enumerate(cv):
pbar.update(i)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cv_test_index.extend(test_index)
# xgb assumes labels \in {0,1}
if model_id == 'xgb' and ymin == -1:
y_train[y_train==-1] = 0
# train/predict as usual
clf,clf_cv = train(X_train,y_train,clfinputs)
clf_pred = clf.predict(X_test)
if model_id == 'xgb' and ymin == -1:
clf_pred[clf_pred==0] = -1
if cv_probs:
clf_prob = clf.predict_proba(X_test)[:,0]
else:
clf_prob = np.ones(len(clf_pred))*np.nan
# loo predicts one label per 'fold'
if cv_type == 'loo':
y_pred[test_index] = clf_pred
y_prob[test_index] = clf_prob
# compute scores for the points we've classified thus far
y_test_cur = np.atleast_1d(y[cv_test_index])
y_pred_cur = np.atleast_1d(y_pred[cv_test_index])
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test_cur,y_pred_cur,uy)
scores[score] = [scorei]
else:
# collect output for all test samples in this fold
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test,clf_pred,uy)
scores[score].append(scorei)
preds.append(clf_pred)
probs.append(clf_prob)
models.append(clf)
modelcvs.append(clf_cv)
for error,error_fn in list(errorfn.items()):
errors[error].append(error_fn(y_test,clf_pred))
if i==0:
scorenames = ['%-16s'%score for score in scorekeys]
logstr = '%-8s %s'%('i',''.join(scorenames))
else:
curscores = ['%-16.4f'%(np.mean(scores[score]))
for score in scorekeys]
logstr = '%-8.3g %s'%(i,''.join(curscores))
print(logstr,file=logfid,flush=True)
# train full model for loo cv, score on loo preds from above
if cv_type == 'loo':
for score,score_fn in list(scorefn.items()):
scores[score] = [score_fn(y,y_pred,uy)]
for error,error_fn in list(errorfn.items()):
errors[error] = [error_fn(y,y_pred)]
clf,clf_cv = train(X,y,clfinputs)
models = [clf]
modelcvs = [clf_cv]
preds = [y_pred]
probs = [y_prob]
pbar.update(i+1)
pbar.finish()
# output scores ordered by key
for score_id in scorekeys:
score_vals = scores[score_id]
print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),
np.std(score_vals)))
return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,
'models':models,'modelcvs':modelcvs}
def trainPredictor(infile):
process = 'trainPredictor'
# fix the random seed to ensure reproducibility
np.random.seed(seed=train_state)
inputs = loadjson(infile)
outputs = {}
outbase = 'predictor%s'%mdy
cwd = os.getcwd()
try:
clfinputs = {}
clfinputs['clf_file'] = inputs['clf_name']+'.pkl'
clfinputs['clf_type'] = inputs['clf_type']
clfinputs['classmap'] = loadClassmap(inputs["classmap_file"])
clfinputs['features'] = loadjson(inputs["feat_file"])
inputurls = inputs.pop('urls',[])
crossvalidate = inputs.pop('crossvalidate',0)
saveclf = inputs.pop('saveclf',0)
cacheoutput = inputs.pop('cacheoutput',0)
if not pathexists(outbase):
os.mkdir(outbase)
if cacheoutput and not pathexists(pathjoin(outbase,cache_dir)):
os.mkdir(pathjoin(outbase,cache_dir))
os.chdir(outbase)
except Exception as e:
exitv = 10
message = 'IO Preprocessing failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
trdat = collectTrainingData(inputurls,clfinputs,cache=cacheoutput)
X, y = trdat['X'],trdat['y']
traintags, trainurls = trdat['traintags'],trdat['trainurls']
errors, skiplist = trdat['skiplist'],trdat['errors']
print('loaded %d training samples (%d skipped)'%(len(y),len(skiplist)))
except Exception as e:
exitv = 11
message = 'Training data collection failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if crossvalidate:
cvoutpkl = "cvout.pkl"
cvlogfile = 'cvout.log'
print('evaluating model via %s cross-validation (logfile=%s)...'%(cv_type,cvlogfile))
starttime = time.time()
cvout = crossValidatePredictor(X,y,clfinputs,logfile=cvlogfile)
outputs['cv_time'] = time.time()-starttime
outputs['cv_out'] = cvoutpkl
outputs['cv_log'] = cvlogfile
with open(cvoutpkl,'wb') as fid:
pickle.dump(cvout,fid)
print('done, output saved to %s.'%cvoutpkl)
except Exception as e:
exitv = 12
message = 'Cross-validation failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if saveclf:
starttime = time.time()
clf,clfcv = train(X,y,clfinputs)
clffile = clfinputs['clf_file']
if clffile[0] != '/':
clffile = pathjoin(cwd,clffile) # path relative to cwd
clfjson = clffile.replace('.pkl','.json')
outputs['clf_time'] = time.time()-starttime
outputs['clf_file'] = clffile
print("training classifier using all available data for deployment...")
with open(clffile,'wb') as fid:
pickle.dump(clf,fid)
with open(clfjson,'w') as fid:
json.dump(clfinputs,fid)
print('done, output saved to %s.'%clffile)
except Exception as e:
exitv = 13
message = 'Classifier training failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
json.dump(outputs,open(outbase+'.met.json','w'),indent=True)
except Exception:
os.chdir(cwd)
exitv = 14
message = 'Failed to create metadata file for ' + outbase
toContext(process,exitv,message)
sys.exit(1)
exitv = 0
os.chdir(cwd)
message = 'trainPredictor finished with no errors.'
toContext(process,exitv,message)
if __name__ == '__main__':
try: status = trainPredictor(sys.argv[1])
except Exception as e:
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % str(e))
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % traceback.format_exc())
raise
sys.exit(status)
| print(process,exitv,message) | identifier_body |
trainPredictor.py | #!/usr/bin/env python3
from builtins import zip
from builtins import str
from builtins import range
import sys
import os
import json
import pickle
import traceback
import numpy as np
import time
import datetime as dtime
from progressbar import ProgressBar, ETA, Bar, Percentage
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from utils.UrlUtils import UrlUtils
#from utils.contextUtils import toContext
def toContext(process,exitv,message):
print(process,exitv,message)
pathjoin = os.path.join
pathexists = os.path.exists
mdy = dtime.datetime.now().strftime('%m%d%y')
product_type = 'interferogram'
cache_dir = 'cached'
train_folds = np.inf # inf = leave-one-out, otherwise k-fold cross validation
train_state = 42 # random seed
train_verbose = 0
train_jobs = -1
cv_type = 'loo' if train_folds==np.inf else '%d-fold'%train_folds
cv_probs = True # record prediction probabilities in addition to labels
scorefn = {} # map from name (e.g., mse) -> f(y_true,y_pred)
scorefn['precision'] = lambda te,pr,ul: precision_score(te,pr,labels=ul)
scorefn['recall'] = lambda te,pr,ul: recall_score(te,pr,labels=ul)
errorfn = {} # map from name (e.g., diff) -> f(y_true,y_pred)
errorfn['match'] = lambda y_true,y_pred: y_true==y_pred
# GRID SEARCH PARAMS FOR PARAMETER TUNING ######################################
gridcv_folds = 2 # number of cross-validation folds per gridcv parameter
gridcv_jobs = -1 # -1 = use all cores
gridcv_verbose = 0 # verbosity level of model-tuning cross-validation output
gridcv_score = 'roc_auc'
# SKLEARN MODEL SPECIFICATIONS #################################################
### Random Forest ##############################################################
rf_trees = 500
rf_feats = np.linspace(0.1,1.0,5)
rf_depth = [2,4,7,10,25]
rf_jobs = 1 if gridcv_jobs == -1 else -1 # multiprocessing + RandomForest don't play nice
rf_tuned = {'max_features':rf_feats,'max_depth':rf_depth}
rf_defaults = {
'n_estimators': rf_trees,'max_features':'sqrt','n_jobs':rf_jobs,
'verbose':train_verbose,'random_state':train_state,
'criterion':'gini','class_weight':'balanced_subsample'
}
### XGBoost ####################################################################
xgb_depth = [3,4,5,10,25]
xgb_subsample = np.linspace(0.1,1,5)
xgb_default = {
'n_estimators':rf_trees,'max_delta_step':1,'learning_rate':0.1,
'objective':'binary:logistic','max_depth':3,'subsample':0.5,
'colsample_bytree':1,'subsample':1,'silent':(not train_verbose),
'seed':train_state,'nthread':train_jobs
}
xgb_tuned = {'learning_rate':[0.001,0.01,0.05,0.1,0.25,0.33],
'max_depth':xgb_depth,'subsample':xgb_subsample}
def loadjson(jsonfile):
with open(jsonfile,'r') as fid:
return json.load(fid)
def dumpjson(objdict,jsonfile):
with open(jsonfile,'w') as fid:
return json.dump(fid,objdict)
def url2pid(url):
"""
url2pid(url): convert url to product id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- product id for url
"""
if url.endswith('/'):
url = url[:-1]
urlsplit = url.split('/')
return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def url2featid(url,product_type):
"""
url2pid(url): convert url to feature id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- feature id for url
"""
return url.replace(product_type,'features').replace('features__','features_'+product_type+'__')
def fdict2vec(featdict,clfinputs):
'''
extract feature vector from dict given classifier parameters
specifying which features to use
'''
fvec = []
try:
featspec = clfinputs['features']
featorder = featspec['feature_order']
featdims = featspec['feature_dims']
cohthr = featspec['cohthr10']
featscoh = featdict['%d'%cohthr]
for fid,fdim in zip(featorder,featdims):
flist = featscoh[fid]
if not isinstance(flist,list):
flist = [flist]
assert(len(flist) == fdim)
fvec.extend(flist)
except Exception:
pass
return fvec
def curlProductMeta(prod_url,verbose=False,remove=True):
"""
curlProductMeta(prod_url,verbose=False)
Arguments:
- prod_url: product url
Keyword Arguments:
- verbose: verbose output (default=False)
Returns: metadata dict from product .met.json
"""
if prod_url.endswith('/'):
prod_url = prod_url[:-1]
prod_json = url2pid(prod_url) + '.met.json'
try:
uu = UrlUtils()
silentoutput = ' ' if verbose else ' --silent '
userstr = uu.dav_u + ':' + uu.dav_p
command = 'curl' + silentoutput + '-k -f -u' + userstr + ' -O ' + pathjoin(prod_url,prod_json)
os.system(command)
except Exception:
return {}
if not pathexists(prod_json):
return {}
meta = loadjson(prod_json)
if remove:
os.remove(prod_json)
return meta
def getFeatures(url,clfinputs,product_type='interferogram'):
'''
retrieves feature vector for the given product url, provided clfinputs
'''
featurl = url2featid(url,product_type)
featdict = curlProductMeta(featurl)
fvec = fdict2vec(featdict,clfinputs)
return fvec
def loadQuery(querymeta,queryoptions=[],queryoutfile=None,cache=False):
'''
builds/posts the faceted search query specified in querymeta and dumps the
result to queryoutfile. if queryoutfile already exists, the query is loaded from
disk rather than executed.
'''
if not cache or not pathexists(queryoutfile):
print('executing faceted search query...')
from utils.queryBuilder import postQuery, buildQuery
from utils.contextUtils import toContext
ret,status = postQuery(buildQuery(querymeta,queryoptions))
if cache and status:
# only dump the query if caching enabled and postQuery succeeds
with open(queryoutfile,'wb') as fid:
pickle.dump(ret,fid)
elif cache:
print('loading cached query from %s...'%queryoutfile)
with open(queryoutfile,'rb') as fid:
ret = pickle.load(fid)
print('query returned %d products'%len(ret))
return ret
def loadClassmap(cmapjson):
"""
loadClassmap(cmapjson) - loads classmap file,
substitutes '_', for '-' as necessary
Arguments:
- cmapjson: classmap .json file
Keyword Arguments:
None
Returns: classmap with substitutions
"""
initialmap = loadjson(cmapjson)
classmap = initialmap.copy()
# substitute '-' with '_' (for user-tagged typos)
tags = list(initialmap.keys())
for tag in tags:
if '-' in tag:
classmap[tag.replace('-','_')] = classmap[tag]
return classmap
def loadPredictorSpec(clfjson):
"""
loadPredictorSpec(clfjson)
Arguments:
- clfjson: json file specifying classifier parameters
Keyword Arguments:
None
Returns: dict containing classifier parameters,
including (but not limited to):
- classmap: classmap to map user tags to labels
- features: dict containing information about features used to train classifier
"""
clfspec = loadjson(clfjson)
clfspec['classmap'] = loadClassmap(clfspec["classmap_file"])
clfspec['features'] = loadjson(clfspec["feat_file"])
return clfspec
def dumpPredictorSpec(inputs):
clfspec = {}
clfspec['clf_file'] = inputs['clf_name']+'.pkl'
for key in ['clf_type','classmap','feat_file']:
clfspec[key] = inputs[key]
json.dump(clfspec,inputs['clf_name']+'.json')
def PredictorSpec(inputjson):
clfspec['clf_file'] = inputs['clf_file']
clfspec['classmap'] = inputs["classmap_file"]
clfspec['features'] = inputs("feat_file")
def | (usertags,classmap):
'''
return dictionary of matched (tag,label) pairs in classmap for all tags
returns {} if none of the tags are present in classmap
'''
labelmap = {}
for tag in usertags:
tag = tag.strip()
for k,v in list(classmap.items()):
if tag.count(k):
labelmap[tag] = v
return labelmap
def queryAllTags(taglist,cache=False):
'''
return all urls with user tags present in taglist
'''
tagpkl = pathjoin(cache_dir,"usertags.pkl")
tagquery = {'dataset_type':product_type,'tags':taglist}
querylist = loadQuery(tagquery,cache=cache,queryoutfile=tagpkl)
querydict = {}
for product in querylist:
purl = product['url']
querydict[purl] = product
return querydict
def collectUrlTags(urllist,querymeta={}):
"""
collectUrlTags(urllist,querymeta={})
collects user tags for a list of urls
Arguments:
- urllist: list of urls
Keyword Arguments:
- querymeta: (default={})
Returns: dict keyed on product id containing
- url: input url
- user_tags: tags for input url
"""
tagdict = {}
nurl = len(urllist)
for i,url in enumerate(urllist):
if url in querymeta: # use the query input if possible
meta = querymeta[url]
else: # otherwise retrieve product metadata via curl
meta = curlProductMeta(url)
tagdict[url2pid(url)] = {'url':url,'user_tags':meta.get('user_tags',[])}
return tagdict
def collectTrainingData(urls,clfinputs,cache=False):
'''
construct matrix of training samples X with labels y by intersecting the set of
IGMs with extracted features (featquery) with the set of tagged IGMs (taggedquery)
Keep only IGMs with tags present in classmap, and select/validate features
according to the parameters in clfinputs.
Returns: dict containing:
- tags: list of user tags used to select training samples
- X, y: training samples, labels
- traintags: tags for each training sample
- trainurls: url for each training sample
- skiplist: list of urls which could not be retrieved due to errors
- errors: list of error strings for each url in skiplist
'''
classmap = clfinputs['classmap']
tags = sorted(list(classmap.keys()))
traindatpkl = pathjoin(cache_dir,"traindat.pkl")
if cache and pathexists(traindatpkl):
print('loading training data from %s...'%traindatpkl)
with open(traindatpkl,'rb') as fid:
ret = pickle.load(fid)
# make sure the set of tags match
if all([ret['tags'][i] == tags[i] for i in range(len(tags))]):
return ret
print("querying %d tags"%len(tags))
querymeta = queryAllTags(tags,cache=cache)
if len(urls)==0:
print('no URLs provided, training using all tags in classmap')
# construct/run query to get metadata for all products with given tags
urls = list(querymeta.keys())
elif isinstance(urls,str):
urls = [urls]
tagdict = collectUrlTags(urls,querymeta=querymeta)
ntagged = len(tagdict)
X,y = [],[]
traintags,trainurls = [],[]
errors,skiplist = [],[]
widgets = ['collecting features for %d products'%ntagged, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=ntagged).start()
for i,pid in enumerate(tagdict):
tdict = tagdict[pid]
turl,ttags = tdict['url'],tdict['user_tags']
taglabel = usertags2label(ttags,classmap)
if len(taglabel) == 0:
continue
fvec = getFeatures(turl,clfinputs)
if len(fvec)==0:
errmsg = "error collecting features for product %s (skipped)"%pid
errors.append(errmsg)
skiplist.append(turl)
continue
pidtags,pidlabs = list(taglabel.keys()),list(taglabel.values())
if len(pidtags) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
elif len(pidtags) > 1:
ulab = np.unique(pidlabs)
if len(ulab) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
else:
errmsg = "conflicting tags (%s) for product %s, skipped"%(pidtags,pid)
errors.append(errmsg)
skiplist.append(turl)
pbar.update(i)
pbar.finish()
# sort products by product url to ensure identical ordering of X,y
sorti = np.argsort(trainurls)
print('collected', len(sorti), 'training samples (skipped %d)'%len(skiplist))
X,y = np.array(X)[sorti,:],np.array(y)[sorti]
traintags,trainurls = np.array(traintags)[sorti],np.array(trainurls)[sorti]
ret = {'tags':tags,'X':X,'y':y,'traintags':traintags,'trainurls':trainurls,
'skiplist':skiplist,'errors':errors}
if cache:
with open(traindatpkl,'wb') as fid:
pickle.dump(ret,fid)
print('saved training data to %s'%traindatpkl)
return ret
def train(X_train,y_train,clfinputs,**kwargs):
"""
train(X_train,y_train,clfinputs,**kwargs)
train a classifier with parameter tuning via gridsearchcv
Arguments:
- X_train: training data (N x n matrix)
- y_train: training labels (N x 1 vector)
- clfinputs: classifier spec
Keyword Arguments:
None
Returns:
- clf: tuned classifier
- cv: cross validation struct used to tune classifier
"""
uy = np.unique(y_train)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return None,None
model_id = clfinputs['clf_type']
if model_id == 'rf':
model_clf = RandomForestClassifier(**rf_defaults)
model_tuned = [rf_tuned]
else:
print("invalid clf_type")
return {}
clf = clone(model_clf)
if model_tuned is not None and len(model_tuned) != 0 and \
len(model_tuned[0]) != 0:
cv = GridSearchCV(clf,model_tuned,cv=gridcv_folds,scoring=gridcv_score,
n_jobs=gridcv_jobs,verbose=gridcv_verbose,refit=True)
cv.fit(X_train, y_train)
clf = cv.best_estimator_
else: # no parameter tuning
clf.fit(X_train,y_train)
return clf,cv
def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):
"""
crossValidatePredictor(X,y,clfinputs,logfile='cvout.log')
use cross validation to assess the quality of a specified classifier
Arguments:
- X: training data
- y: training labels
- clfinputs: dict of classifier inputs
Keyword Arguments:
- logfile: cross-validation outfile (default='cvout.log')
Returns:
- dict containing:
- models: model for each cross validation fold
- scores: scores for each fold according to each scorefn
- preds: predictions for each training sample
- errors: errors for each training sample according to each errorfn
- modelcvs: cross validation structure used to train each model
"""
models,modelcvs,preds,probs = [],[],[],[]
scores = dict([(key,[]) for key in list(scorefn.keys())])
errors = dict([(key,[]) for key in list(errorfn.keys())])
# validate class labels
uy = np.unique(y)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return {}
N,ymin = len(y),uy[0]
if cv_type == 'loo':
cv = KFold(N,n_folds=N,random_state=train_state)
y_pred = np.zeros(N)
y_prob = np.zeros(N)
else:
cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)
n_folds = len(cv)
model_id = clfinputs['clf_type']
widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()
with open(logfile,'w') as logfid:
cv_test_index = []
scorekeys = sorted(scores.keys())
for i,(train_index,test_index) in enumerate(cv):
pbar.update(i)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cv_test_index.extend(test_index)
# xgb assumes labels \in {0,1}
if model_id == 'xgb' and ymin == -1:
y_train[y_train==-1] = 0
# train/predict as usual
clf,clf_cv = train(X_train,y_train,clfinputs)
clf_pred = clf.predict(X_test)
if model_id == 'xgb' and ymin == -1:
clf_pred[clf_pred==0] = -1
if cv_probs:
clf_prob = clf.predict_proba(X_test)[:,0]
else:
clf_prob = np.ones(len(clf_pred))*np.nan
# loo predicts one label per 'fold'
if cv_type == 'loo':
y_pred[test_index] = clf_pred
y_prob[test_index] = clf_prob
# compute scores for the points we've classified thus far
y_test_cur = np.atleast_1d(y[cv_test_index])
y_pred_cur = np.atleast_1d(y_pred[cv_test_index])
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test_cur,y_pred_cur,uy)
scores[score] = [scorei]
else:
# collect output for all test samples in this fold
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test,clf_pred,uy)
scores[score].append(scorei)
preds.append(clf_pred)
probs.append(clf_prob)
models.append(clf)
modelcvs.append(clf_cv)
for error,error_fn in list(errorfn.items()):
errors[error].append(error_fn(y_test,clf_pred))
if i==0:
scorenames = ['%-16s'%score for score in scorekeys]
logstr = '%-8s %s'%('i',''.join(scorenames))
else:
curscores = ['%-16.4f'%(np.mean(scores[score]))
for score in scorekeys]
logstr = '%-8.3g %s'%(i,''.join(curscores))
print(logstr,file=logfid,flush=True)
# train full model for loo cv, score on loo preds from above
if cv_type == 'loo':
for score,score_fn in list(scorefn.items()):
scores[score] = [score_fn(y,y_pred,uy)]
for error,error_fn in list(errorfn.items()):
errors[error] = [error_fn(y,y_pred)]
clf,clf_cv = train(X,y,clfinputs)
models = [clf]
modelcvs = [clf_cv]
preds = [y_pred]
probs = [y_prob]
pbar.update(i+1)
pbar.finish()
# output scores ordered by key
for score_id in scorekeys:
score_vals = scores[score_id]
print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),
np.std(score_vals)))
return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,
'models':models,'modelcvs':modelcvs}
def trainPredictor(infile):
process = 'trainPredictor'
# fix the random seed to ensure reproducibility
np.random.seed(seed=train_state)
inputs = loadjson(infile)
outputs = {}
outbase = 'predictor%s'%mdy
cwd = os.getcwd()
try:
clfinputs = {}
clfinputs['clf_file'] = inputs['clf_name']+'.pkl'
clfinputs['clf_type'] = inputs['clf_type']
clfinputs['classmap'] = loadClassmap(inputs["classmap_file"])
clfinputs['features'] = loadjson(inputs["feat_file"])
inputurls = inputs.pop('urls',[])
crossvalidate = inputs.pop('crossvalidate',0)
saveclf = inputs.pop('saveclf',0)
cacheoutput = inputs.pop('cacheoutput',0)
if not pathexists(outbase):
os.mkdir(outbase)
if cacheoutput and not pathexists(pathjoin(outbase,cache_dir)):
os.mkdir(pathjoin(outbase,cache_dir))
os.chdir(outbase)
except Exception as e:
exitv = 10
message = 'IO Preprocessing failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
trdat = collectTrainingData(inputurls,clfinputs,cache=cacheoutput)
X, y = trdat['X'],trdat['y']
traintags, trainurls = trdat['traintags'],trdat['trainurls']
errors, skiplist = trdat['skiplist'],trdat['errors']
print('loaded %d training samples (%d skipped)'%(len(y),len(skiplist)))
except Exception as e:
exitv = 11
message = 'Training data collection failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if crossvalidate:
cvoutpkl = "cvout.pkl"
cvlogfile = 'cvout.log'
print('evaluating model via %s cross-validation (logfile=%s)...'%(cv_type,cvlogfile))
starttime = time.time()
cvout = crossValidatePredictor(X,y,clfinputs,logfile=cvlogfile)
outputs['cv_time'] = time.time()-starttime
outputs['cv_out'] = cvoutpkl
outputs['cv_log'] = cvlogfile
with open(cvoutpkl,'wb') as fid:
pickle.dump(cvout,fid)
print('done, output saved to %s.'%cvoutpkl)
except Exception as e:
exitv = 12
message = 'Cross-validation failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if saveclf:
starttime = time.time()
clf,clfcv = train(X,y,clfinputs)
clffile = clfinputs['clf_file']
if clffile[0] != '/':
clffile = pathjoin(cwd,clffile) # path relative to cwd
clfjson = clffile.replace('.pkl','.json')
outputs['clf_time'] = time.time()-starttime
outputs['clf_file'] = clffile
print("training classifier using all available data for deployment...")
with open(clffile,'wb') as fid:
pickle.dump(clf,fid)
with open(clfjson,'w') as fid:
json.dump(clfinputs,fid)
print('done, output saved to %s.'%clffile)
except Exception as e:
exitv = 13
message = 'Classifier training failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
json.dump(outputs,open(outbase+'.met.json','w'),indent=True)
except Exception:
os.chdir(cwd)
exitv = 14
message = 'Failed to create metadata file for ' + outbase
toContext(process,exitv,message)
sys.exit(1)
exitv = 0
os.chdir(cwd)
message = 'trainPredictor finished with no errors.'
toContext(process,exitv,message)
if __name__ == '__main__':
try: status = trainPredictor(sys.argv[1])
except Exception as e:
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % str(e))
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % traceback.format_exc())
raise
sys.exit(status)
| usertags2label | identifier_name |
trainPredictor.py | #!/usr/bin/env python3
from builtins import zip
from builtins import str
from builtins import range
import sys
import os
import json
import pickle
import traceback
import numpy as np
import time
import datetime as dtime
from progressbar import ProgressBar, ETA, Bar, Percentage
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from utils.UrlUtils import UrlUtils
#from utils.contextUtils import toContext
def toContext(process,exitv,message):
print(process,exitv,message)
pathjoin = os.path.join
pathexists = os.path.exists
mdy = dtime.datetime.now().strftime('%m%d%y')
product_type = 'interferogram'
cache_dir = 'cached'
train_folds = np.inf # inf = leave-one-out, otherwise k-fold cross validation
train_state = 42 # random seed
train_verbose = 0
train_jobs = -1
cv_type = 'loo' if train_folds==np.inf else '%d-fold'%train_folds
cv_probs = True # record prediction probabilities in addition to labels
scorefn = {} # map from name (e.g., mse) -> f(y_true,y_pred)
scorefn['precision'] = lambda te,pr,ul: precision_score(te,pr,labels=ul)
scorefn['recall'] = lambda te,pr,ul: recall_score(te,pr,labels=ul)
errorfn = {} # map from name (e.g., diff) -> f(y_true,y_pred)
errorfn['match'] = lambda y_true,y_pred: y_true==y_pred
# GRID SEARCH PARAMS FOR PARAMETER TUNING ######################################
gridcv_folds = 2 # number of cross-validation folds per gridcv parameter
gridcv_jobs = -1 # -1 = use all cores
gridcv_verbose = 0 # verbosity level of model-tuning cross-validation output
gridcv_score = 'roc_auc'
# SKLEARN MODEL SPECIFICATIONS #################################################
### Random Forest ##############################################################
rf_trees = 500
rf_feats = np.linspace(0.1,1.0,5)
rf_depth = [2,4,7,10,25]
rf_jobs = 1 if gridcv_jobs == -1 else -1 # multiprocessing + RandomForest don't play nice
rf_tuned = {'max_features':rf_feats,'max_depth':rf_depth}
rf_defaults = {
'n_estimators': rf_trees,'max_features':'sqrt','n_jobs':rf_jobs,
'verbose':train_verbose,'random_state':train_state,
'criterion':'gini','class_weight':'balanced_subsample'
}
### XGBoost ####################################################################
xgb_depth = [3,4,5,10,25]
xgb_subsample = np.linspace(0.1,1,5)
xgb_default = {
'n_estimators':rf_trees,'max_delta_step':1,'learning_rate':0.1,
'objective':'binary:logistic','max_depth':3,'subsample':0.5,
'colsample_bytree':1,'subsample':1,'silent':(not train_verbose),
'seed':train_state,'nthread':train_jobs
}
xgb_tuned = {'learning_rate':[0.001,0.01,0.05,0.1,0.25,0.33],
'max_depth':xgb_depth,'subsample':xgb_subsample}
def loadjson(jsonfile):
with open(jsonfile,'r') as fid:
return json.load(fid)
def dumpjson(objdict,jsonfile):
with open(jsonfile,'w') as fid:
return json.dump(fid,objdict)
def url2pid(url):
"""
url2pid(url): convert url to product id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- product id for url
"""
if url.endswith('/'):
url = url[:-1]
urlsplit = url.split('/')
return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def url2featid(url,product_type):
"""
url2pid(url): convert url to feature id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- feature id for url
"""
return url.replace(product_type,'features').replace('features__','features_'+product_type+'__')
def fdict2vec(featdict,clfinputs):
'''
extract feature vector from dict given classifier parameters
specifying which features to use
'''
fvec = []
try:
featspec = clfinputs['features']
featorder = featspec['feature_order']
featdims = featspec['feature_dims']
cohthr = featspec['cohthr10']
featscoh = featdict['%d'%cohthr]
for fid,fdim in zip(featorder,featdims):
flist = featscoh[fid]
if not isinstance(flist,list):
flist = [flist]
assert(len(flist) == fdim)
fvec.extend(flist)
except Exception:
pass
return fvec
def curlProductMeta(prod_url,verbose=False,remove=True):
"""
curlProductMeta(prod_url,verbose=False)
Arguments:
- prod_url: product url
Keyword Arguments:
- verbose: verbose output (default=False)
Returns: metadata dict from product .met.json
"""
if prod_url.endswith('/'):
prod_url = prod_url[:-1]
prod_json = url2pid(prod_url) + '.met.json'
try:
uu = UrlUtils()
silentoutput = ' ' if verbose else ' --silent '
userstr = uu.dav_u + ':' + uu.dav_p
command = 'curl' + silentoutput + '-k -f -u' + userstr + ' -O ' + pathjoin(prod_url,prod_json)
os.system(command)
except Exception:
return {}
if not pathexists(prod_json):
return {}
meta = loadjson(prod_json)
if remove:
os.remove(prod_json)
return meta
def getFeatures(url,clfinputs,product_type='interferogram'):
'''
retrieves feature vector for the given product url, provided clfinputs
'''
featurl = url2featid(url,product_type)
featdict = curlProductMeta(featurl)
fvec = fdict2vec(featdict,clfinputs)
return fvec
def loadQuery(querymeta,queryoptions=[],queryoutfile=None,cache=False):
'''
builds/posts the faceted search query specified in querymeta and dumps the
result to queryoutfile. if queryoutfile already exists, the query is loaded from
disk rather than executed.
'''
if not cache or not pathexists(queryoutfile):
print('executing faceted search query...')
from utils.queryBuilder import postQuery, buildQuery
from utils.contextUtils import toContext
ret,status = postQuery(buildQuery(querymeta,queryoptions))
if cache and status:
# only dump the query if caching enabled and postQuery succeeds
with open(queryoutfile,'wb') as fid:
pickle.dump(ret,fid)
elif cache:
print('loading cached query from %s...'%queryoutfile)
with open(queryoutfile,'rb') as fid:
ret = pickle.load(fid)
print('query returned %d products'%len(ret))
return ret
def loadClassmap(cmapjson):
"""
loadClassmap(cmapjson) - loads classmap file,
substitutes '_', for '-' as necessary
Arguments:
- cmapjson: classmap .json file
Keyword Arguments:
None
Returns: classmap with substitutions
"""
initialmap = loadjson(cmapjson)
classmap = initialmap.copy()
# substitute '-' with '_' (for user-tagged typos)
tags = list(initialmap.keys())
for tag in tags:
if '-' in tag:
classmap[tag.replace('-','_')] = classmap[tag]
return classmap
def loadPredictorSpec(clfjson):
"""
loadPredictorSpec(clfjson)
Arguments:
- clfjson: json file specifying classifier parameters
Keyword Arguments:
None
Returns: dict containing classifier parameters,
including (but not limited to):
- classmap: classmap to map user tags to labels
- features: dict containing information about features used to train classifier
"""
clfspec = loadjson(clfjson)
clfspec['classmap'] = loadClassmap(clfspec["classmap_file"])
clfspec['features'] = loadjson(clfspec["feat_file"])
return clfspec
def dumpPredictorSpec(inputs):
clfspec = {}
clfspec['clf_file'] = inputs['clf_name']+'.pkl'
for key in ['clf_type','classmap','feat_file']:
clfspec[key] = inputs[key]
json.dump(clfspec,inputs['clf_name']+'.json')
def PredictorSpec(inputjson):
clfspec['clf_file'] = inputs['clf_file']
clfspec['classmap'] = inputs["classmap_file"]
clfspec['features'] = inputs("feat_file")
def usertags2label(usertags,classmap):
'''
return dictionary of matched (tag,label) pairs in classmap for all tags
returns {} if none of the tags are present in classmap
'''
labelmap = {}
for tag in usertags:
tag = tag.strip()
for k,v in list(classmap.items()):
if tag.count(k):
labelmap[tag] = v
return labelmap
def queryAllTags(taglist,cache=False):
'''
return all urls with user tags present in taglist
'''
tagpkl = pathjoin(cache_dir,"usertags.pkl")
tagquery = {'dataset_type':product_type,'tags':taglist}
querylist = loadQuery(tagquery,cache=cache,queryoutfile=tagpkl)
querydict = {}
for product in querylist:
purl = product['url']
querydict[purl] = product
return querydict
def collectUrlTags(urllist,querymeta={}):
"""
collectUrlTags(urllist,querymeta={})
collects user tags for a list of urls
Arguments:
- urllist: list of urls
Keyword Arguments:
- querymeta: (default={})
Returns: dict keyed on product id containing
- url: input url
- user_tags: tags for input url
"""
tagdict = {}
nurl = len(urllist)
for i,url in enumerate(urllist):
if url in querymeta: # use the query input if possible
meta = querymeta[url]
else: # otherwise retrieve product metadata via curl
meta = curlProductMeta(url)
tagdict[url2pid(url)] = {'url':url,'user_tags':meta.get('user_tags',[])}
return tagdict
def collectTrainingData(urls,clfinputs,cache=False):
'''
construct matrix of training samples X with labels y by intersecting the set of
IGMs with extracted features (featquery) with the set of tagged IGMs (taggedquery)
Keep only IGMs with tags present in classmap, and select/validate features
according to the parameters in clfinputs.
Returns: dict containing:
- tags: list of user tags used to select training samples
- X, y: training samples, labels
- traintags: tags for each training sample
- trainurls: url for each training sample
- skiplist: list of urls which could not be retrieved due to errors
- errors: list of error strings for each url in skiplist
'''
classmap = clfinputs['classmap']
tags = sorted(list(classmap.keys()))
traindatpkl = pathjoin(cache_dir,"traindat.pkl")
if cache and pathexists(traindatpkl):
print('loading training data from %s...'%traindatpkl)
with open(traindatpkl,'rb') as fid:
ret = pickle.load(fid)
# make sure the set of tags match
if all([ret['tags'][i] == tags[i] for i in range(len(tags))]):
return ret
print("querying %d tags"%len(tags))
querymeta = queryAllTags(tags,cache=cache)
if len(urls)==0:
print('no URLs provided, training using all tags in classmap')
# construct/run query to get metadata for all products with given tags
urls = list(querymeta.keys())
elif isinstance(urls,str):
urls = [urls]
tagdict = collectUrlTags(urls,querymeta=querymeta)
ntagged = len(tagdict)
X,y = [],[]
traintags,trainurls = [],[]
errors,skiplist = [],[]
widgets = ['collecting features for %d products'%ntagged, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=ntagged).start()
for i,pid in enumerate(tagdict):
tdict = tagdict[pid]
turl,ttags = tdict['url'],tdict['user_tags']
taglabel = usertags2label(ttags,classmap)
if len(taglabel) == 0:
continue
fvec = getFeatures(turl,clfinputs)
if len(fvec)==0:
errmsg = "error collecting features for product %s (skipped)"%pid
errors.append(errmsg)
skiplist.append(turl)
continue
pidtags,pidlabs = list(taglabel.keys()),list(taglabel.values())
if len(pidtags) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
elif len(pidtags) > 1:
ulab = np.unique(pidlabs)
if len(ulab) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
else:
errmsg = "conflicting tags (%s) for product %s, skipped"%(pidtags,pid)
errors.append(errmsg)
skiplist.append(turl)
pbar.update(i)
pbar.finish()
# sort products by product url to ensure identical ordering of X,y
sorti = np.argsort(trainurls)
print('collected', len(sorti), 'training samples (skipped %d)'%len(skiplist))
X,y = np.array(X)[sorti,:],np.array(y)[sorti]
traintags,trainurls = np.array(traintags)[sorti],np.array(trainurls)[sorti]
ret = {'tags':tags,'X':X,'y':y,'traintags':traintags,'trainurls':trainurls,
'skiplist':skiplist,'errors':errors}
if cache:
with open(traindatpkl,'wb') as fid:
pickle.dump(ret,fid)
print('saved training data to %s'%traindatpkl)
return ret
def train(X_train,y_train,clfinputs,**kwargs):
"""
train(X_train,y_train,clfinputs,**kwargs)
train a classifier with parameter tuning via gridsearchcv
Arguments:
- X_train: training data (N x n matrix)
- y_train: training labels (N x 1 vector)
- clfinputs: classifier spec
Keyword Arguments:
None
Returns:
- clf: tuned classifier
- cv: cross validation struct used to tune classifier
"""
uy = np.unique(y_train)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return None,None
model_id = clfinputs['clf_type']
if model_id == 'rf':
|
else:
print("invalid clf_type")
return {}
clf = clone(model_clf)
if model_tuned is not None and len(model_tuned) != 0 and \
len(model_tuned[0]) != 0:
cv = GridSearchCV(clf,model_tuned,cv=gridcv_folds,scoring=gridcv_score,
n_jobs=gridcv_jobs,verbose=gridcv_verbose,refit=True)
cv.fit(X_train, y_train)
clf = cv.best_estimator_
else: # no parameter tuning
clf.fit(X_train,y_train)
return clf,cv
def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):
"""
crossValidatePredictor(X,y,clfinputs,logfile='cvout.log')
use cross validation to assess the quality of a specified classifier
Arguments:
- X: training data
- y: training labels
- clfinputs: dict of classifier inputs
Keyword Arguments:
- logfile: cross-validation outfile (default='cvout.log')
Returns:
- dict containing:
- models: model for each cross validation fold
- scores: scores for each fold according to each scorefn
- preds: predictions for each training sample
- errors: errors for each training sample according to each errorfn
- modelcvs: cross validation structure used to train each model
"""
models,modelcvs,preds,probs = [],[],[],[]
scores = dict([(key,[]) for key in list(scorefn.keys())])
errors = dict([(key,[]) for key in list(errorfn.keys())])
# validate class labels
uy = np.unique(y)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return {}
N,ymin = len(y),uy[0]
if cv_type == 'loo':
cv = KFold(N,n_folds=N,random_state=train_state)
y_pred = np.zeros(N)
y_prob = np.zeros(N)
else:
cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)
n_folds = len(cv)
model_id = clfinputs['clf_type']
widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()
with open(logfile,'w') as logfid:
cv_test_index = []
scorekeys = sorted(scores.keys())
for i,(train_index,test_index) in enumerate(cv):
pbar.update(i)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cv_test_index.extend(test_index)
# xgb assumes labels \in {0,1}
if model_id == 'xgb' and ymin == -1:
y_train[y_train==-1] = 0
# train/predict as usual
clf,clf_cv = train(X_train,y_train,clfinputs)
clf_pred = clf.predict(X_test)
if model_id == 'xgb' and ymin == -1:
clf_pred[clf_pred==0] = -1
if cv_probs:
clf_prob = clf.predict_proba(X_test)[:,0]
else:
clf_prob = np.ones(len(clf_pred))*np.nan
# loo predicts one label per 'fold'
if cv_type == 'loo':
y_pred[test_index] = clf_pred
y_prob[test_index] = clf_prob
# compute scores for the points we've classified thus far
y_test_cur = np.atleast_1d(y[cv_test_index])
y_pred_cur = np.atleast_1d(y_pred[cv_test_index])
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test_cur,y_pred_cur,uy)
scores[score] = [scorei]
else:
# collect output for all test samples in this fold
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test,clf_pred,uy)
scores[score].append(scorei)
preds.append(clf_pred)
probs.append(clf_prob)
models.append(clf)
modelcvs.append(clf_cv)
for error,error_fn in list(errorfn.items()):
errors[error].append(error_fn(y_test,clf_pred))
if i==0:
scorenames = ['%-16s'%score for score in scorekeys]
logstr = '%-8s %s'%('i',''.join(scorenames))
else:
curscores = ['%-16.4f'%(np.mean(scores[score]))
for score in scorekeys]
logstr = '%-8.3g %s'%(i,''.join(curscores))
print(logstr,file=logfid,flush=True)
# train full model for loo cv, score on loo preds from above
if cv_type == 'loo':
for score,score_fn in list(scorefn.items()):
scores[score] = [score_fn(y,y_pred,uy)]
for error,error_fn in list(errorfn.items()):
errors[error] = [error_fn(y,y_pred)]
clf,clf_cv = train(X,y,clfinputs)
models = [clf]
modelcvs = [clf_cv]
preds = [y_pred]
probs = [y_prob]
pbar.update(i+1)
pbar.finish()
# output scores ordered by key
for score_id in scorekeys:
score_vals = scores[score_id]
print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),
np.std(score_vals)))
return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,
'models':models,'modelcvs':modelcvs}
def trainPredictor(infile):
process = 'trainPredictor'
# fix the random seed to ensure reproducibility
np.random.seed(seed=train_state)
inputs = loadjson(infile)
outputs = {}
outbase = 'predictor%s'%mdy
cwd = os.getcwd()
try:
clfinputs = {}
clfinputs['clf_file'] = inputs['clf_name']+'.pkl'
clfinputs['clf_type'] = inputs['clf_type']
clfinputs['classmap'] = loadClassmap(inputs["classmap_file"])
clfinputs['features'] = loadjson(inputs["feat_file"])
inputurls = inputs.pop('urls',[])
crossvalidate = inputs.pop('crossvalidate',0)
saveclf = inputs.pop('saveclf',0)
cacheoutput = inputs.pop('cacheoutput',0)
if not pathexists(outbase):
os.mkdir(outbase)
if cacheoutput and not pathexists(pathjoin(outbase,cache_dir)):
os.mkdir(pathjoin(outbase,cache_dir))
os.chdir(outbase)
except Exception as e:
exitv = 10
message = 'IO Preprocessing failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
trdat = collectTrainingData(inputurls,clfinputs,cache=cacheoutput)
X, y = trdat['X'],trdat['y']
traintags, trainurls = trdat['traintags'],trdat['trainurls']
errors, skiplist = trdat['skiplist'],trdat['errors']
print('loaded %d training samples (%d skipped)'%(len(y),len(skiplist)))
except Exception as e:
exitv = 11
message = 'Training data collection failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if crossvalidate:
cvoutpkl = "cvout.pkl"
cvlogfile = 'cvout.log'
print('evaluating model via %s cross-validation (logfile=%s)...'%(cv_type,cvlogfile))
starttime = time.time()
cvout = crossValidatePredictor(X,y,clfinputs,logfile=cvlogfile)
outputs['cv_time'] = time.time()-starttime
outputs['cv_out'] = cvoutpkl
outputs['cv_log'] = cvlogfile
with open(cvoutpkl,'wb') as fid:
pickle.dump(cvout,fid)
print('done, output saved to %s.'%cvoutpkl)
except Exception as e:
exitv = 12
message = 'Cross-validation failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if saveclf:
starttime = time.time()
clf,clfcv = train(X,y,clfinputs)
clffile = clfinputs['clf_file']
if clffile[0] != '/':
clffile = pathjoin(cwd,clffile) # path relative to cwd
clfjson = clffile.replace('.pkl','.json')
outputs['clf_time'] = time.time()-starttime
outputs['clf_file'] = clffile
print("training classifier using all available data for deployment...")
with open(clffile,'wb') as fid:
pickle.dump(clf,fid)
with open(clfjson,'w') as fid:
json.dump(clfinputs,fid)
print('done, output saved to %s.'%clffile)
except Exception as e:
exitv = 13
message = 'Classifier training failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
json.dump(outputs,open(outbase+'.met.json','w'),indent=True)
except Exception:
os.chdir(cwd)
exitv = 14
message = 'Failed to create metadata file for ' + outbase
toContext(process,exitv,message)
sys.exit(1)
exitv = 0
os.chdir(cwd)
message = 'trainPredictor finished with no errors.'
toContext(process,exitv,message)
if __name__ == '__main__':
try: status = trainPredictor(sys.argv[1])
except Exception as e:
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % str(e))
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % traceback.format_exc())
raise
sys.exit(status)
| model_clf = RandomForestClassifier(**rf_defaults)
model_tuned = [rf_tuned] | conditional_block |
trainPredictor.py | #!/usr/bin/env python3
from builtins import zip
from builtins import str
from builtins import range
import sys
import os
import json
import pickle
import traceback
import numpy as np
import time
import datetime as dtime
from progressbar import ProgressBar, ETA, Bar, Percentage
from sklearn.base import clone
from sklearn.preprocessing import MinMaxScaler
from sklearn.cross_validation import KFold, StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import precision_score, recall_score
from utils.UrlUtils import UrlUtils
#from utils.contextUtils import toContext
def toContext(process,exitv,message):
print(process,exitv,message)
pathjoin = os.path.join
pathexists = os.path.exists
mdy = dtime.datetime.now().strftime('%m%d%y')
product_type = 'interferogram'
cache_dir = 'cached'
train_folds = np.inf # inf = leave-one-out, otherwise k-fold cross validation
train_state = 42 # random seed
train_verbose = 0
train_jobs = -1
cv_type = 'loo' if train_folds==np.inf else '%d-fold'%train_folds
cv_probs = True # record prediction probabilities in addition to labels
scorefn = {} # map from name (e.g., mse) -> f(y_true,y_pred)
scorefn['precision'] = lambda te,pr,ul: precision_score(te,pr,labels=ul)
scorefn['recall'] = lambda te,pr,ul: recall_score(te,pr,labels=ul)
errorfn = {} # map from name (e.g., diff) -> f(y_true,y_pred)
errorfn['match'] = lambda y_true,y_pred: y_true==y_pred
# GRID SEARCH PARAMS FOR PARAMETER TUNING ######################################
gridcv_folds = 2 # number of cross-validation folds per gridcv parameter
gridcv_jobs = -1 # -1 = use all cores
gridcv_verbose = 0 # verbosity level of model-tuning cross-validation output
gridcv_score = 'roc_auc'
# SKLEARN MODEL SPECIFICATIONS #################################################
### Random Forest ##############################################################
rf_trees = 500
rf_feats = np.linspace(0.1,1.0,5)
rf_depth = [2,4,7,10,25]
rf_jobs = 1 if gridcv_jobs == -1 else -1 # multiprocessing + RandomForest don't play nice
rf_tuned = {'max_features':rf_feats,'max_depth':rf_depth}
rf_defaults = {
'n_estimators': rf_trees,'max_features':'sqrt','n_jobs':rf_jobs,
'verbose':train_verbose,'random_state':train_state,
'criterion':'gini','class_weight':'balanced_subsample'
}
### XGBoost ####################################################################
xgb_depth = [3,4,5,10,25]
xgb_subsample = np.linspace(0.1,1,5)
xgb_default = {
'n_estimators':rf_trees,'max_delta_step':1,'learning_rate':0.1,
'objective':'binary:logistic','max_depth':3,'subsample':0.5,
'colsample_bytree':1,'subsample':1,'silent':(not train_verbose),
'seed':train_state,'nthread':train_jobs
}
xgb_tuned = {'learning_rate':[0.001,0.01,0.05,0.1,0.25,0.33],
'max_depth':xgb_depth,'subsample':xgb_subsample}
def loadjson(jsonfile):
with open(jsonfile,'r') as fid:
return json.load(fid)
def dumpjson(objdict,jsonfile):
with open(jsonfile,'w') as fid:
return json.dump(fid,objdict)
def url2pid(url):
"""
url2pid(url): convert url to product id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns:
- product id for url
"""
if url.endswith('/'):
url = url[:-1]
urlsplit = url.split('/')
return (urlsplit[-2] + '_' + urlsplit[-1]).replace('__','_')
def url2featid(url,product_type):
"""
url2pid(url): convert url to feature id
Arguments:
- url: url to convert
Keyword Arguments:
None
Returns: | return url.replace(product_type,'features').replace('features__','features_'+product_type+'__')
def fdict2vec(featdict,clfinputs):
'''
extract feature vector from dict given classifier parameters
specifying which features to use
'''
fvec = []
try:
featspec = clfinputs['features']
featorder = featspec['feature_order']
featdims = featspec['feature_dims']
cohthr = featspec['cohthr10']
featscoh = featdict['%d'%cohthr]
for fid,fdim in zip(featorder,featdims):
flist = featscoh[fid]
if not isinstance(flist,list):
flist = [flist]
assert(len(flist) == fdim)
fvec.extend(flist)
except Exception:
pass
return fvec
def curlProductMeta(prod_url,verbose=False,remove=True):
"""
curlProductMeta(prod_url,verbose=False)
Arguments:
- prod_url: product url
Keyword Arguments:
- verbose: verbose output (default=False)
Returns: metadata dict from product .met.json
"""
if prod_url.endswith('/'):
prod_url = prod_url[:-1]
prod_json = url2pid(prod_url) + '.met.json'
try:
uu = UrlUtils()
silentoutput = ' ' if verbose else ' --silent '
userstr = uu.dav_u + ':' + uu.dav_p
command = 'curl' + silentoutput + '-k -f -u' + userstr + ' -O ' + pathjoin(prod_url,prod_json)
os.system(command)
except Exception:
return {}
if not pathexists(prod_json):
return {}
meta = loadjson(prod_json)
if remove:
os.remove(prod_json)
return meta
def getFeatures(url,clfinputs,product_type='interferogram'):
'''
retrieves feature vector for the given product url, provided clfinputs
'''
featurl = url2featid(url,product_type)
featdict = curlProductMeta(featurl)
fvec = fdict2vec(featdict,clfinputs)
return fvec
def loadQuery(querymeta,queryoptions=[],queryoutfile=None,cache=False):
'''
builds/posts the faceted search query specified in querymeta and dumps the
result to queryoutfile. if queryoutfile already exists, the query is loaded from
disk rather than executed.
'''
if not cache or not pathexists(queryoutfile):
print('executing faceted search query...')
from utils.queryBuilder import postQuery, buildQuery
from utils.contextUtils import toContext
ret,status = postQuery(buildQuery(querymeta,queryoptions))
if cache and status:
# only dump the query if caching enabled and postQuery succeeds
with open(queryoutfile,'wb') as fid:
pickle.dump(ret,fid)
elif cache:
print('loading cached query from %s...'%queryoutfile)
with open(queryoutfile,'rb') as fid:
ret = pickle.load(fid)
print('query returned %d products'%len(ret))
return ret
def loadClassmap(cmapjson):
"""
loadClassmap(cmapjson) - loads classmap file,
substitutes '_', for '-' as necessary
Arguments:
- cmapjson: classmap .json file
Keyword Arguments:
None
Returns: classmap with substitutions
"""
initialmap = loadjson(cmapjson)
classmap = initialmap.copy()
# substitute '-' with '_' (for user-tagged typos)
tags = list(initialmap.keys())
for tag in tags:
if '-' in tag:
classmap[tag.replace('-','_')] = classmap[tag]
return classmap
def loadPredictorSpec(clfjson):
"""
loadPredictorSpec(clfjson)
Arguments:
- clfjson: json file specifying classifier parameters
Keyword Arguments:
None
Returns: dict containing classifier parameters,
including (but not limited to):
- classmap: classmap to map user tags to labels
- features: dict containing information about features used to train classifier
"""
clfspec = loadjson(clfjson)
clfspec['classmap'] = loadClassmap(clfspec["classmap_file"])
clfspec['features'] = loadjson(clfspec["feat_file"])
return clfspec
def dumpPredictorSpec(inputs):
clfspec = {}
clfspec['clf_file'] = inputs['clf_name']+'.pkl'
for key in ['clf_type','classmap','feat_file']:
clfspec[key] = inputs[key]
json.dump(clfspec,inputs['clf_name']+'.json')
def PredictorSpec(inputjson):
clfspec['clf_file'] = inputs['clf_file']
clfspec['classmap'] = inputs["classmap_file"]
clfspec['features'] = inputs("feat_file")
def usertags2label(usertags,classmap):
'''
return dictionary of matched (tag,label) pairs in classmap for all tags
returns {} if none of the tags are present in classmap
'''
labelmap = {}
for tag in usertags:
tag = tag.strip()
for k,v in list(classmap.items()):
if tag.count(k):
labelmap[tag] = v
return labelmap
def queryAllTags(taglist,cache=False):
'''
return all urls with user tags present in taglist
'''
tagpkl = pathjoin(cache_dir,"usertags.pkl")
tagquery = {'dataset_type':product_type,'tags':taglist}
querylist = loadQuery(tagquery,cache=cache,queryoutfile=tagpkl)
querydict = {}
for product in querylist:
purl = product['url']
querydict[purl] = product
return querydict
def collectUrlTags(urllist,querymeta={}):
"""
collectUrlTags(urllist,querymeta={})
collects user tags for a list of urls
Arguments:
- urllist: list of urls
Keyword Arguments:
- querymeta: (default={})
Returns: dict keyed on product id containing
- url: input url
- user_tags: tags for input url
"""
tagdict = {}
nurl = len(urllist)
for i,url in enumerate(urllist):
if url in querymeta: # use the query input if possible
meta = querymeta[url]
else: # otherwise retrieve product metadata via curl
meta = curlProductMeta(url)
tagdict[url2pid(url)] = {'url':url,'user_tags':meta.get('user_tags',[])}
return tagdict
def collectTrainingData(urls,clfinputs,cache=False):
'''
construct matrix of training samples X with labels y by intersecting the set of
IGMs with extracted features (featquery) with the set of tagged IGMs (taggedquery)
Keep only IGMs with tags present in classmap, and select/validate features
according to the parameters in clfinputs.
Returns: dict containing:
- tags: list of user tags used to select training samples
- X, y: training samples, labels
- traintags: tags for each training sample
- trainurls: url for each training sample
- skiplist: list of urls which could not be retrieved due to errors
- errors: list of error strings for each url in skiplist
'''
classmap = clfinputs['classmap']
tags = sorted(list(classmap.keys()))
traindatpkl = pathjoin(cache_dir,"traindat.pkl")
if cache and pathexists(traindatpkl):
print('loading training data from %s...'%traindatpkl)
with open(traindatpkl,'rb') as fid:
ret = pickle.load(fid)
# make sure the set of tags match
if all([ret['tags'][i] == tags[i] for i in range(len(tags))]):
return ret
print("querying %d tags"%len(tags))
querymeta = queryAllTags(tags,cache=cache)
if len(urls)==0:
print('no URLs provided, training using all tags in classmap')
# construct/run query to get metadata for all products with given tags
urls = list(querymeta.keys())
elif isinstance(urls,str):
urls = [urls]
tagdict = collectUrlTags(urls,querymeta=querymeta)
ntagged = len(tagdict)
X,y = [],[]
traintags,trainurls = [],[]
errors,skiplist = [],[]
widgets = ['collecting features for %d products'%ntagged, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=ntagged).start()
for i,pid in enumerate(tagdict):
tdict = tagdict[pid]
turl,ttags = tdict['url'],tdict['user_tags']
taglabel = usertags2label(ttags,classmap)
if len(taglabel) == 0:
continue
fvec = getFeatures(turl,clfinputs)
if len(fvec)==0:
errmsg = "error collecting features for product %s (skipped)"%pid
errors.append(errmsg)
skiplist.append(turl)
continue
pidtags,pidlabs = list(taglabel.keys()),list(taglabel.values())
if len(pidtags) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
elif len(pidtags) > 1:
ulab = np.unique(pidlabs)
if len(ulab) == 1:
X.append(fvec)
y.append(pidlabs[0])
traintags.append(pidtags[0])
trainurls.append(turl)
else:
errmsg = "conflicting tags (%s) for product %s, skipped"%(pidtags,pid)
errors.append(errmsg)
skiplist.append(turl)
pbar.update(i)
pbar.finish()
# sort products by product url to ensure identical ordering of X,y
sorti = np.argsort(trainurls)
print('collected', len(sorti), 'training samples (skipped %d)'%len(skiplist))
X,y = np.array(X)[sorti,:],np.array(y)[sorti]
traintags,trainurls = np.array(traintags)[sorti],np.array(trainurls)[sorti]
ret = {'tags':tags,'X':X,'y':y,'traintags':traintags,'trainurls':trainurls,
'skiplist':skiplist,'errors':errors}
if cache:
with open(traindatpkl,'wb') as fid:
pickle.dump(ret,fid)
print('saved training data to %s'%traindatpkl)
return ret
def train(X_train,y_train,clfinputs,**kwargs):
"""
train(X_train,y_train,clfinputs,**kwargs)
train a classifier with parameter tuning via gridsearchcv
Arguments:
- X_train: training data (N x n matrix)
- y_train: training labels (N x 1 vector)
- clfinputs: classifier spec
Keyword Arguments:
None
Returns:
- clf: tuned classifier
- cv: cross validation struct used to tune classifier
"""
uy = np.unique(y_train)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return None,None
model_id = clfinputs['clf_type']
if model_id == 'rf':
model_clf = RandomForestClassifier(**rf_defaults)
model_tuned = [rf_tuned]
else:
print("invalid clf_type")
return {}
clf = clone(model_clf)
if model_tuned is not None and len(model_tuned) != 0 and \
len(model_tuned[0]) != 0:
cv = GridSearchCV(clf,model_tuned,cv=gridcv_folds,scoring=gridcv_score,
n_jobs=gridcv_jobs,verbose=gridcv_verbose,refit=True)
cv.fit(X_train, y_train)
clf = cv.best_estimator_
else: # no parameter tuning
clf.fit(X_train,y_train)
return clf,cv
def crossValidatePredictor(X,y,clfinputs,logfile='cvout.log'):
"""
crossValidatePredictor(X,y,clfinputs,logfile='cvout.log')
use cross validation to assess the quality of a specified classifier
Arguments:
- X: training data
- y: training labels
- clfinputs: dict of classifier inputs
Keyword Arguments:
- logfile: cross-validation outfile (default='cvout.log')
Returns:
- dict containing:
- models: model for each cross validation fold
- scores: scores for each fold according to each scorefn
- preds: predictions for each training sample
- errors: errors for each training sample according to each errorfn
- modelcvs: cross validation structure used to train each model
"""
models,modelcvs,preds,probs = [],[],[],[]
scores = dict([(key,[]) for key in list(scorefn.keys())])
errors = dict([(key,[]) for key in list(errorfn.keys())])
# validate class labels
uy = np.unique(y)
if len(uy) != 2:
print('error: need 2 classes for classification!')
return {}
N,ymin = len(y),uy[0]
if cv_type == 'loo':
cv = KFold(N,n_folds=N,random_state=train_state)
y_pred = np.zeros(N)
y_prob = np.zeros(N)
else:
cv = StratifiedKFold(y,n_folds=train_folds,random_state=train_state)
n_folds = len(cv)
model_id = clfinputs['clf_type']
widgets = ['%s cv: '%cv_type, Percentage(), ' ', Bar('='), ' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=n_folds+(cv_type=='loo')).start()
with open(logfile,'w') as logfid:
cv_test_index = []
scorekeys = sorted(scores.keys())
for i,(train_index,test_index) in enumerate(cv):
pbar.update(i)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cv_test_index.extend(test_index)
# xgb assumes labels \in {0,1}
if model_id == 'xgb' and ymin == -1:
y_train[y_train==-1] = 0
# train/predict as usual
clf,clf_cv = train(X_train,y_train,clfinputs)
clf_pred = clf.predict(X_test)
if model_id == 'xgb' and ymin == -1:
clf_pred[clf_pred==0] = -1
if cv_probs:
clf_prob = clf.predict_proba(X_test)[:,0]
else:
clf_prob = np.ones(len(clf_pred))*np.nan
# loo predicts one label per 'fold'
if cv_type == 'loo':
y_pred[test_index] = clf_pred
y_prob[test_index] = clf_prob
# compute scores for the points we've classified thus far
y_test_cur = np.atleast_1d(y[cv_test_index])
y_pred_cur = np.atleast_1d(y_pred[cv_test_index])
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test_cur,y_pred_cur,uy)
scores[score] = [scorei]
else:
# collect output for all test samples in this fold
for score,score_fn in list(scorefn.items()):
scorei = score_fn(y_test,clf_pred,uy)
scores[score].append(scorei)
preds.append(clf_pred)
probs.append(clf_prob)
models.append(clf)
modelcvs.append(clf_cv)
for error,error_fn in list(errorfn.items()):
errors[error].append(error_fn(y_test,clf_pred))
if i==0:
scorenames = ['%-16s'%score for score in scorekeys]
logstr = '%-8s %s'%('i',''.join(scorenames))
else:
curscores = ['%-16.4f'%(np.mean(scores[score]))
for score in scorekeys]
logstr = '%-8.3g %s'%(i,''.join(curscores))
print(logstr,file=logfid,flush=True)
# train full model for loo cv, score on loo preds from above
if cv_type == 'loo':
for score,score_fn in list(scorefn.items()):
scores[score] = [score_fn(y,y_pred,uy)]
for error,error_fn in list(errorfn.items()):
errors[error] = [error_fn(y,y_pred)]
clf,clf_cv = train(X,y,clfinputs)
models = [clf]
modelcvs = [clf_cv]
preds = [y_pred]
probs = [y_prob]
pbar.update(i+1)
pbar.finish()
# output scores ordered by key
for score_id in scorekeys:
score_vals = scores[score_id]
print('mean %s: %7.4f (std=%7.4f)'%(score_id, np.mean(score_vals),
np.std(score_vals)))
return {'preds':preds,'probs':probs,'scores':scores,'errors':errors,
'models':models,'modelcvs':modelcvs}
def trainPredictor(infile):
process = 'trainPredictor'
# fix the random seed to ensure reproducibility
np.random.seed(seed=train_state)
inputs = loadjson(infile)
outputs = {}
outbase = 'predictor%s'%mdy
cwd = os.getcwd()
try:
clfinputs = {}
clfinputs['clf_file'] = inputs['clf_name']+'.pkl'
clfinputs['clf_type'] = inputs['clf_type']
clfinputs['classmap'] = loadClassmap(inputs["classmap_file"])
clfinputs['features'] = loadjson(inputs["feat_file"])
inputurls = inputs.pop('urls',[])
crossvalidate = inputs.pop('crossvalidate',0)
saveclf = inputs.pop('saveclf',0)
cacheoutput = inputs.pop('cacheoutput',0)
if not pathexists(outbase):
os.mkdir(outbase)
if cacheoutput and not pathexists(pathjoin(outbase,cache_dir)):
os.mkdir(pathjoin(outbase,cache_dir))
os.chdir(outbase)
except Exception as e:
exitv = 10
message = 'IO Preprocessing failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
trdat = collectTrainingData(inputurls,clfinputs,cache=cacheoutput)
X, y = trdat['X'],trdat['y']
traintags, trainurls = trdat['traintags'],trdat['trainurls']
errors, skiplist = trdat['skiplist'],trdat['errors']
print('loaded %d training samples (%d skipped)'%(len(y),len(skiplist)))
except Exception as e:
exitv = 11
message = 'Training data collection failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if crossvalidate:
cvoutpkl = "cvout.pkl"
cvlogfile = 'cvout.log'
print('evaluating model via %s cross-validation (logfile=%s)...'%(cv_type,cvlogfile))
starttime = time.time()
cvout = crossValidatePredictor(X,y,clfinputs,logfile=cvlogfile)
outputs['cv_time'] = time.time()-starttime
outputs['cv_out'] = cvoutpkl
outputs['cv_log'] = cvlogfile
with open(cvoutpkl,'wb') as fid:
pickle.dump(cvout,fid)
print('done, output saved to %s.'%cvoutpkl)
except Exception as e:
exitv = 12
message = 'Cross-validation failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
if saveclf:
starttime = time.time()
clf,clfcv = train(X,y,clfinputs)
clffile = clfinputs['clf_file']
if clffile[0] != '/':
clffile = pathjoin(cwd,clffile) # path relative to cwd
clfjson = clffile.replace('.pkl','.json')
outputs['clf_time'] = time.time()-starttime
outputs['clf_file'] = clffile
print("training classifier using all available data for deployment...")
with open(clffile,'wb') as fid:
pickle.dump(clf,fid)
with open(clfjson,'w') as fid:
json.dump(clfinputs,fid)
print('done, output saved to %s.'%clffile)
except Exception as e:
exitv = 13
message = 'Classifier training failed with exception %s: %s' % (str(e), traceback.format_exc())
toContext(process,exitv,message)
sys.exit(1)
try:
json.dump(outputs,open(outbase+'.met.json','w'),indent=True)
except Exception:
os.chdir(cwd)
exitv = 14
message = 'Failed to create metadata file for ' + outbase
toContext(process,exitv,message)
sys.exit(1)
exitv = 0
os.chdir(cwd)
message = 'trainPredictor finished with no errors.'
toContext(process,exitv,message)
if __name__ == '__main__':
try: status = trainPredictor(sys.argv[1])
except Exception as e:
with open('_alt_error.txt', 'w') as f:
f.write("%s\n" % str(e))
with open('_alt_traceback.txt', 'w') as f:
f.write("%s\n" % traceback.format_exc())
raise
sys.exit(status) | - feature id for url
""" | random_line_split |
photcalibration.py | #from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib
from longtermphotzp import photdbinterface
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
import argparse
import re
import glob
import os
import math
import sys
import logging
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy import units as u
import datetime
_logger = logging.getLogger(__name__)
__author__ = 'dharbeck'
class PhotCalib():
# To be replaced with map:
# LCO filter -> sdss filter name, sdsss g-i color term, airmass term, default zero point.
# possibly need this for all site:telescope:camera:filter settings. Maybe start with one
# default and see where it goes.
ps1catalog = None
def __init__(self, ps1dir):
# super(PhotCalib, self).__init__(pipeline_context)
# TODO: capture error if no ps1 catalog is found at location
self.ps1catalog = PS1IPP(ps1dir)
def do_stage(self, images):
""" future hook for BANZAI pipeline integration
"""
for i, image in enumerate(images):
pass
# logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)
def generateCrossmatchedCatalog(self, image, mintexp=60):
""" Load the banzai-generated photometry catalog from 'CAT' extension, queries PS1 catalog for image FoV, and
returns a cross-matched catalog.
Errors conditions:
if photometricfilter is not supported, none is returned
if exposure time is < 60 seconds, None is returned.
:param image: input fits image path+name
:return:
"""
# Build up a baseline refernce catlog with meta data
retCatalog = {'fname': image,
'instmag': None
}
# Read banzai star catalog
testimage = fits.open(image)
# Boilerplate grab of status information
ra = testimage['SCI'].header['CRVAL1']
dec = testimage['SCI'].header['CRVAL2']
if not self.ps1catalog.isInCatalogFootprint(ra, dec):
_logger.debug("Image not in PS1 footprint. Ignoring")
testimage.close()
return None
retCatalog['exptime'] = testimage['SCI'].header['EXPTIME']
retCatalog['instfilter'] = testimage['SCI'].header['FILTER']
retCatalog['airmass'] = testimage['SCI'].header['AIRMASS']
retCatalog['dateobs'] = testimage['SCI'].header['DATE-OBS']
retCatalog['instrument'] = testimage['SCI'].header['INSTRUME']
retCatalog['siteid'] = testimage['SCI'].header['SITEID']
retCatalog['domid'] = testimage['SCI'].header['ENCID']
retCatalog['telescope'] = testimage['SCI'].header['TELID']
retCatalog['FOCOBOFF'] = testimage['SCI'].header['FOCOBOFF']
# Check if filter is supported
if retCatalog['instfilter'] not in self.ps1catalog.FILTERMAPPING:
_logger.debug("%s - Filter %s not viable for photometric calibration. Sorry" % (image, retCatalog['instfilter']))
testimage.close()
return None
# Check if exposure time is long enough
if (retCatalog['exptime'] < mintexp):
_logger.debug("%s - Exposure %s time is deemed too short, ignoring" % (image, retCatalog['exptime']))
testimage.close()
return None
# verify there is no deliberate defocus
if (retCatalog['FOCOBOFF'] is not None) and (retCatalog['FOCOBOFF'] != 0):
_logger.debug("%s - Exposure is deliberately defocussed by %s, ignoring" % (image, retCatalog['FOCOBOFF']))
testimage.close()
return None
# Get the instrumental filter and the matching reference catalog filter names.
referenceInformation = self.ps1catalog.FILTERMAPPING[retCatalog['instfilter']]
referenceFilterName = referenceInformation['refMag']
# Load photometry catalog from image, and transform into RA/Dec coordinates
try:
instCatalog = testimage['CAT'].data
except:
_logger.warning("%s - No extension \'CAT\' available, skipping." % (image))
testimage.close()
return None
# Transform the image catalog to RA / Dec based on the WCS solution in the header.
# TODO: rerun astrometry.net with a higher order distortion model
image_wcs = WCS(testimage['SCI'].header)
try:
ras, decs = image_wcs.all_pix2world(instCatalog['x'], instCatalog['y'], 1)
except:
_logger.error("%s: Failed to convert images coordinates to world coordinates. Giving up on file." % (image))
testimage.close()
return None
# Now we have all we wanted from the input image, close it
testimage.close()
# Query reference catalog TODO: paramterize FoV of query!
refcatalog = self.ps1catalog.get_reference_catalog(ra, dec, 0.25)
if refcatalog is None:
_logger.warning("%s, no reference catalog received." % image)
return None
# Start the catalog matching, using astropy skycoords built-in functions.
cInstrument = SkyCoord(ra=ras * u.degree, dec=decs * u.degree)
cReference = SkyCoord(ra=refcatalog['RA'] * u.degree, dec=refcatalog['DEC'] * u.degree)
idx, d2d, d3d = cReference.match_to_catalog_sky(cInstrument)
# Reshuffle the source catalog to index-match the reference catalog.
# There is probably as smarter way of doing this!
instCatalogT = np.transpose(instCatalog)[idx]
instCatalog = np.transpose(instCatalogT)
# Measure the distance between matched pairs. Important to down-select viable pairs.
distance = cReference.separation(cInstrument[idx]).arcsecond
# Define a reasonable condition on what is a good match on good photometry
condition = (distance < 5) & (instCatalog['FLUX'] > 0) & (refcatalog[referenceFilterName] > 0) & (
refcatalog[referenceFilterName] < 26)
# Calculate instrumental magnitude from PSF instrument photometry
instmag = -2.5 * np.log10(instCatalog['FLUX'][condition] / retCatalog['exptime'])
# Calculate the magnitude difference between reference and inst catalog
retCatalog['instmag'] = instmag
retCatalog['refcol'] = (refcatalog['g'] - refcatalog['i'])[condition]
retCatalog['refmag'] = refcatalog[referenceFilterName][condition]
retCatalog['ra'] = refcatalog['RA'][condition]
retCatalog['dec'] = refcatalog['DEC'][condition]
retCatalog['matchDistance'] = distance[condition]
# TODO: Read photometric error columns from reference and instrument catalogs, properly propagate error.
return retCatalog
def reject_outliers(self, data, m=2):
"""
Reject data from vector that are > m std deviations from median
:param m:
:return:
"""
std = np.std(data)
return data[abs(data - np.median(data)) < m * std]
def analyzeImage(self, imageName, outputdb= None,
outputimageRootDir=None, mintexp=60):
""" Do full photometric zeropoint analysis on an image
"""
# TODO: Make this procedure thread safe so it can be accelerated a bit.
retCatalog = self.generateCrossmatchedCatalog(imageName, mintexp=mintexp)
if (retCatalog is None) or (retCatalog['instmag'] is None) or (len(retCatalog['ra']) < 10):
if retCatalog is None:
return
if len(retCatalog['ra']) < 10:
_logger.info ("%s: Catalog returned, but is has less than 10 stars. Ignoring. " % (imageName))
return
# calculate the per star zeropoint
magZP = retCatalog['refmag'] - retCatalog['instmag']
refmag = retCatalog['refmag']
refcol = retCatalog['refcol']
# Calculate the photometric zeropoint.
# TODO: Robust median w/ rejection, error propagation.
cleandata = self.reject_outliers(magZP, 3)
photzp = np.median(cleandata)
photzpsig = np.std(cleandata)
# calculate color term
try:
cond = (refcol > 0) & (refcol < 3) & (np.abs(magZP - photzp) < 0.75)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
delta = np.abs(magZP - photzp - color_p(refcol))
cond = (delta < 0.2)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
colorterm = colorparams[0]
except:
_logger.warning("could not fit a color term. ")
color_p = None
colorterm = 0
# if requested, generate all sorts of diagnostic plots
if (outputimageRootDir is not None) and (os.path.exists(outputimageRootDir)):
outbasename = os.path.basename(imageName)
outbasename = re.sub('.fits.fz', '', outbasename)
### Zeropoint plot
plt.figure()
plt.plot(refmag, magZP, '.')
plt.xlim([10, 22])
plt.ylim([photzp-0.5, photzp+0.5])
plt.axhline(y=photzp, color='r', linestyle='-')
plt.xlabel("Reference catalog mag")
plt.ylabel("Reference Mag - Instrumnetal Mag (%s)" % (retCatalog['instfilter']))
plt.title("Photometric zeropoint %s %5.2f" % (outbasename, photzp))
plt.savefig("%s/%s_%s_zp.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
### Color term plot
plt.figure()
plt.plot(refcol, magZP - photzp, '.')
if color_p is not None:
xp = np.linspace(-0.5, 3.5, 10)
plt.plot(xp, color_p(xp), '-', label="color term fit % 6.4f" % (colorterm))
plt.legend()
plt.xlim ([-0.5, 3.0])
plt.ylim ([-1,1])
plt.xlabel("(g-r)$_{\\rm{SDSS}}$ Reference")
plt.ylabel("Reference Mag - Instrumnetal Mag - ZP (%5.2f) %s" % (photzp, retCatalog['instfilter']))
plt.title("Color correction %s " % (outbasename))
plt.savefig("%s/%s_%s_color.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
# TODO: Make this thread safe, e.g., write to transactional database, or return values for storing externally.
if outputdb is not None:
outputdb.addphotzp ( (imageName, retCatalog['dateobs'].replace('T', ' '), retCatalog['siteid'], retCatalog['domid'],
retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
retCatalog['airmass'], photzp, colorterm, photzpsig))
else:
_logger.info ("Not safing output for image %s " % imageName)
# with open(pickle, 'a') as f:
# output = "%s %s %s %s %s %s %s %s % 6.3f % 6.3f % 6.3f\n" % (
# imageName, retCatalog['dateobs'], retCatalog['siteid'], retCatalog['domid'],
# retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
# retCatalog['airmass'], photzp, colorterm, photzpsig)
# _logger.info(output)
# f.write(output)
# f.close()
return photzp
class PS1IPP:
""" Class to access local, distilled copy of PS1 data release.
Based on code from WIYN ODI quickreduce pipeline, developed by Ralf Kotula. See:
https://github.com/WIYN-ODI/QuickReduce
"""
FILTERMAPPING = {}
FILTERMAPPING['gp'] = {'refMag': 'g', 'colorTerm': 0.0, 'airmassTerm': 0.20, 'defaultZP': 0.0}
FILTERMAPPING['rp'] = {'refMag': 'r', 'colorTerm': 0.0, 'airmassTerm': 0.12, 'defaultZP': 0.0}
FILTERMAPPING['ip'] = {'refMag': 'i', 'colorTerm': 0.0, 'airmassTerm': 0.08, 'defaultZP': 0.0}
FILTERMAPPING['zp'] = {'refMag': 'z', 'colorTerm': 0.0, 'airmassTerm': 0.05, 'defaultZP': 0.0}
### PS to SDSS color transformations according to Finkbeiner 2016
### http://iopscience.iop.org/article/10.3847/0004-637X/822/2/66/meta#apj522061s2-4 Table 2
### Note that this transformation is valid for stars only. For the purpose of photometric
### calibration, it is desirable to select point sources only from the input catalog.
## Why reverse the order of the color term entries? Data are entered in the order as they are
## shown in paper. Reverse after the fact to avoid confusion when looking at paper
ps1colorterms = {}
ps1colorterms['g'] = [-0.01808, -0.13595, +0.01941, -0.00183][::-1]
ps1colorterms['r'] = [-0.01836, -0.03577, +0.02612, -0.00558][::-1]
ps1colorterms['i'] = [+0.01170, -0.00400, +0.00066, -0.00058][::-1]
ps1colorterms['z'] = [-0.01062, +0.07529, -0.03592, +0.00890][::-1]
def __init__(self, basedir):
self.basedir = basedir
self.skytable = None
def PS1toSDSS(self, table):
"""
Modify table in situ from PS1 to SDSS, requires column names compatible with ps1colorterms definition.
:param table:
:return: modified table.
"""
if table is not None:
pscolor = table['g'] - table['i']
for filter in self.ps1colorterms:
colorcorrection = np.polyval(self.ps1colorterms[filter], pscolor)
table[filter] -= colorcorrection
return table
def isInCatalogFootprint(self, ra, dec):
""" Verify if image is in catalog footprint.
TODO: Account for image field of view
"""
# PanSTARRS has valid entries for DEc > - 30 degrees
return dec >= -30.0
def get_reference_catalog(self, ra, dec, radius, overwrite_select=False):
""" Read i fits table from local catalog copy. Concatenate tables columns
from different fits tables for full coverage.
"""
# A lot of safeguarding boiler plate to ensure catalog files are valid.
if (self.basedir is None) or (not os.path.isdir(self.basedir)):
_logger.error("Unable to find reference catalog: %s" % (str(self.basedir)))
return None
# Load the SkyTable so we know in what files to look for the catalog"
_logger.debug("Using catalog found in %s" % (self.basedir))
skytable_filename = "%s/SkyTable.fits" % (self.basedir)
if (not os.path.isfile(skytable_filename)):
_logger.fatal("Unable to find catalog index file in %s!" % (self.basedir))
return None
# Read in the master index hdu
skytable_hdu = fits.open(skytable_filename)
skytable = skytable_hdu['SKY_REGION'].data
# Select entries that match our list
# print ra, dec, radius, type(ra), type(dec), type(radius)
# logger.debug("# Searching for stars within %.1f degress around %f , %f ..." % (radius, ra, dec))
if (not radius == None and radius > 0):
min_dec = dec - radius
max_dec = dec + radius
min_ra = ra - radius / math.cos(math.radians(dec))
max_ra = ra + radius / math.cos(math.radians(dec))
else:
min_dec, max_dec = dec[0], dec[1]
min_ra, max_ra = ra[0], ra[1]
_logger.debug("Querying catalog: Ra=%f...%f Dec=%f...%f" % (min_ra, max_ra, min_dec, max_dec))
if (max_ra > 360.):
# This wraps around the high end, shift all ra values by -180
# Now all search RAs are ok and around the 180, next also move the catalog values
selected = skytable['R_MIN'] < 180
skytable['R_MAX'][selected] += 360
skytable['R_MIN'][selected] += 360
if (min_ra < 0):
# Wrap around at the low end
selected = skytable['R_MAX'] > 180
skytable['R_MAX'][selected] -= 360
skytable['R_MIN'][selected] -= 360
_logger.debug("# Search radius: RA=%.1f ... %.1f DEC=%.1f ... %.1f" % (min_ra, max_ra, min_dec, max_dec))
try:
needed_catalogs = (skytable['PARENT'] > 0) & (skytable['PARENT'] < 25) & \
(skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
except KeyError:
# try without the PARENT field
needed_catalogs = (skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
# print skytable[needed_catalogs]
files_to_read = skytable['NAME'][needed_catalogs]
files_to_read = [f.strip() for f in files_to_read]
_logger.debug(files_to_read)
skytable_hdu.close() # Warning: might erase the loaded data, might need to copy array!
# Now quickly go over the list and take care of all filenames that still have a 0x00 in them
for i in range(len(files_to_read)):
found_at = files_to_read[i].find('\0')
if (found_at > 0):
files_to_read[i] = files_to_read[i][:found_at]
# Load all frames, one by one, and select all stars in the valid range.
# Then add them to the catalog with RAs and DECs
full_catalog = None # numpy.zeros(shape=(0,6))
catalog_filenames = []
# Start iterating though catalogs
for catalogname in files_to_read:
catalogfile = "%s/%s" % (self.basedir, catalogname)
# print catalogfile
if (not os.path.isfile(catalogfile)):
# not a file, try adding .fits to the end of the filename
if (os.path.isfile(catalogfile + ".fits")):
catalogfile += ".fits"
else:
# neither option (w/ or w/o .fits added is a file)
_logger.warning(
"Catalog file (%s) not found (base-dir: %s)" % (os.path.abspath(catalogfile), self.basedir))
continue
try:
hdu_cat = fits.open(catalogfile)
except:
_logger.warning("Unable to open catalog file %s" % (catalogfile))
continue
catalog_filenames.append(catalogfile)
_logger.debug("Adding %s to list of catalog files being used" % (catalogfile))
# read table into a nd-array buffer
cat_full = hdu_cat[1].data
hdu_cat.close()
# Read the RA and DEC values
cat_ra = cat_full['RA']
cat_dec = cat_full['DEC']
# To select the right region, shift a temporary catalog
cat_ra_shifted = cat_ra
if (max_ra > 360.):
cat_ra_shifted[cat_ra < 180] += 360
elif (min_ra < 0):
cat_ra_shifted[cat_ra > 180] -= 360
select_from_cat = (cat_ra_shifted > min_ra) & (cat_ra_shifted < max_ra) & (cat_dec > min_dec) & (
cat_dec < max_dec)
array_to_add = cat_full[select_from_cat]
_logger.debug("Read %d sources from %s" % (array_to_add.shape[0], catalogname))
if (full_catalog is None):
full_catalog = array_to_add
else:
full_catalog = np.append(full_catalog, array_to_add, axis=0)
# print photom_grizy[:3,:]
if (full_catalog is None):
_logger.warning("No stars found in area %s, %s from catalog %s" % (
str(ra), str(dec),
# ra[0], ra[1], dec[0], dec[1],
self.basedir))
else:
_logger.debug(
"Read a total of %d stars from %d catalogs!" % (full_catalog.shape[0], len(files_to_read)))
self.PS1toSDSS(full_catalog)
return full_catalog
#### Wrapper routines to use photometric zeropointing stand-alone
def crawlDirectory(directory, db, args):
|
def crawlSiteCameraArchive(site, camera, args, date=None):
'''
Process in the archive
:param site:
:param camera:
:param args:
:param date:
:return:
'''
if date is None:
date = '*'
if site is None:
_logger.error ("Must define a site !")
exit (1)
imagedb = photdbinterface(args.imagedbPrefix)
searchdir = "%s/%s/%s/%s/%s" % (args.rootdir, site, camera, date, args.processstatus)
# search = "%s/%s/%s/%s/preview/*-[es]11.fits.fz" % (args.rootdir, site, camera, date)
_logger.info("Searching in directories: %s" % (searchdir))
crawlDirectory(searchdir, imagedb, args)
imagedb.close();
def crawlSite(site, type, args):
""" Search for all cameras of a given type (fl, kb,fs) in a site directory and processed them """
searchdir = "%s/%s/%s*" % (args.rootdir, site, type)
cameralist = glob.glob(searchdir)
cameras = []
for candidate in cameralist:
cameras.append((site, os.path.basename(os.path.normpath(candidate))))
for setup in cameras:
for date in args.date:
print(setup[0], setup[1], date)
crawlSiteCameraArchive(setup[0], setup[1], args, date)
def parseCommandLine():
""" Read command line parameters
"""
parser = argparse.ArgumentParser(
description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')
parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],
help='Set the log level')
parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',
help='Directory of PS1 catalog')
parser.add_argument("--diagnosticplotsdir", dest='outputimageRootDir', default=None,
help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')
parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',
help='Result output directory. .db file is written here')
parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',
help="LCO archive root directory")
parser.add_argument('--site', dest='site', default=None, help='sites code for camera')
parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')
parser.add_argument('--redo', action='store_true')
parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')
mutex.add_argument('--lastNdays', type=int)
cameragroup = parser.add_mutually_exclusive_group()
cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')
cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],
help='camera type to process at selected sites to process. ')
cameragroup.add_argument('--crawldirectory', default=None, type=str,
help="process all reduced image in specific directoy")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')
args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)
if args.outputimageRootDir is not None:
args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)
print ("Writing db to directory: %s" % args.outputimageRootDir)
if args.crawldirectory is not None:
args.crawldirectory = os.path.expanduser(args.crawldirectory)
if (args.lastNdays is not None):
args.date=[]
today = datetime.datetime.utcnow()
for ii in range (args.lastNdays):
day = today - datetime.timedelta(days=ii)
args.date.append (day.strftime("%Y%m%d"))
args.date = args.date[::-1]
args.ps1dir = os.path.expanduser(args.ps1dir)
print (args.processstatus)
return args
def photzpmain():
args = parseCommandLine()
if args.cameratype is not None:
cameras = [camera for camera in args.cameratype.split(',')]
if args.site is not None:
sites = [site for site in args.site.split(',')]
else:
sites = ('lsc', 'cpt', 'ogg', 'coj', 'tfn', 'elp', 'sqa', 'bpl')
print("Crawling through camera types ", cameras, " at sites ", sites, " for date ", args.date)
for site in sites:
for cameratype in cameras:
crawlSite(site, cameratype, args)
elif args.camera is not None:
if args.site is None:
sites = '*'
else:
sites = args.site
print("Calibrating camera ", args.camera, " at site ", sites, ' for date ', args.date)
for date in args.date:
crawlSiteCameraArchive(sites, args.camera, args, date=date)
elif args.crawldirectory is not None:
imagedb = photdbinterface("%s/%s" % (args.crawldirectory, 'imagezp.db'))
crawlDirectory(args.crawldirectory, imagedb, args)
imagedb.close()
else:
print("Need to specify either a camera, or a camera type.")
sys.exit(0)
if __name__ == '__main__':
assert sys.version_info >= (3,5)
photzpmain()
| search = "%s/*-[es][19]1.fits.fz" % (directory)
inputlist = glob.glob(search)
initialsize = len (inputlist)
rejects = []
if not args.redo:
for image in inputlist:
if db.exists(image):
rejects.append (image)
for r in rejects:
inputlist.remove (r)
print ("Found %d files intially, but cleaned %d already measured images. Starting analysis of %d files" % (initialsize, len(rejects), len(inputlist)))
photzpStage = PhotCalib(args.ps1dir)
for image in inputlist:
image = image.rstrip()
photzpStage.analyzeImage(image, outputdb=db, outputimageRootDir=args.outputimageRootDir, mintexp=args.mintexp) | identifier_body |
photcalibration.py | #from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib
from longtermphotzp import photdbinterface
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
import argparse
import re
import glob
import os
import math
import sys
import logging
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy import units as u
import datetime
_logger = logging.getLogger(__name__)
__author__ = 'dharbeck'
class PhotCalib():
# To be replaced with map:
# LCO filter -> sdss filter name, sdsss g-i color term, airmass term, default zero point.
# possibly need this for all site:telescope:camera:filter settings. Maybe start with one
# default and see where it goes.
ps1catalog = None
def __init__(self, ps1dir):
# super(PhotCalib, self).__init__(pipeline_context)
# TODO: capture error if no ps1 catalog is found at location
self.ps1catalog = PS1IPP(ps1dir)
def do_stage(self, images):
""" future hook for BANZAI pipeline integration
"""
for i, image in enumerate(images):
pass
# logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)
def generateCrossmatchedCatalog(self, image, mintexp=60):
""" Load the banzai-generated photometry catalog from 'CAT' extension, queries PS1 catalog for image FoV, and
returns a cross-matched catalog.
Errors conditions:
if photometricfilter is not supported, none is returned
if exposure time is < 60 seconds, None is returned.
:param image: input fits image path+name
:return:
"""
# Build up a baseline refernce catlog with meta data
retCatalog = {'fname': image,
'instmag': None
}
# Read banzai star catalog
testimage = fits.open(image)
# Boilerplate grab of status information
ra = testimage['SCI'].header['CRVAL1']
dec = testimage['SCI'].header['CRVAL2']
if not self.ps1catalog.isInCatalogFootprint(ra, dec):
_logger.debug("Image not in PS1 footprint. Ignoring")
testimage.close()
return None
retCatalog['exptime'] = testimage['SCI'].header['EXPTIME']
retCatalog['instfilter'] = testimage['SCI'].header['FILTER']
retCatalog['airmass'] = testimage['SCI'].header['AIRMASS']
retCatalog['dateobs'] = testimage['SCI'].header['DATE-OBS']
retCatalog['instrument'] = testimage['SCI'].header['INSTRUME']
retCatalog['siteid'] = testimage['SCI'].header['SITEID']
retCatalog['domid'] = testimage['SCI'].header['ENCID']
retCatalog['telescope'] = testimage['SCI'].header['TELID']
retCatalog['FOCOBOFF'] = testimage['SCI'].header['FOCOBOFF']
# Check if filter is supported
if retCatalog['instfilter'] not in self.ps1catalog.FILTERMAPPING:
_logger.debug("%s - Filter %s not viable for photometric calibration. Sorry" % (image, retCatalog['instfilter']))
testimage.close()
return None
# Check if exposure time is long enough
if (retCatalog['exptime'] < mintexp):
_logger.debug("%s - Exposure %s time is deemed too short, ignoring" % (image, retCatalog['exptime']))
testimage.close()
return None
# verify there is no deliberate defocus
if (retCatalog['FOCOBOFF'] is not None) and (retCatalog['FOCOBOFF'] != 0):
_logger.debug("%s - Exposure is deliberately defocussed by %s, ignoring" % (image, retCatalog['FOCOBOFF']))
testimage.close()
return None
# Get the instrumental filter and the matching reference catalog filter names.
referenceInformation = self.ps1catalog.FILTERMAPPING[retCatalog['instfilter']]
referenceFilterName = referenceInformation['refMag']
# Load photometry catalog from image, and transform into RA/Dec coordinates
try:
instCatalog = testimage['CAT'].data
except:
_logger.warning("%s - No extension \'CAT\' available, skipping." % (image))
testimage.close()
return None
# Transform the image catalog to RA / Dec based on the WCS solution in the header.
# TODO: rerun astrometry.net with a higher order distortion model
image_wcs = WCS(testimage['SCI'].header)
try:
ras, decs = image_wcs.all_pix2world(instCatalog['x'], instCatalog['y'], 1)
except:
_logger.error("%s: Failed to convert images coordinates to world coordinates. Giving up on file." % (image))
testimage.close()
return None
# Now we have all we wanted from the input image, close it
testimage.close()
# Query reference catalog TODO: paramterize FoV of query!
refcatalog = self.ps1catalog.get_reference_catalog(ra, dec, 0.25)
if refcatalog is None:
_logger.warning("%s, no reference catalog received." % image)
return None
# Start the catalog matching, using astropy skycoords built-in functions.
cInstrument = SkyCoord(ra=ras * u.degree, dec=decs * u.degree)
cReference = SkyCoord(ra=refcatalog['RA'] * u.degree, dec=refcatalog['DEC'] * u.degree)
idx, d2d, d3d = cReference.match_to_catalog_sky(cInstrument)
# Reshuffle the source catalog to index-match the reference catalog.
# There is probably as smarter way of doing this!
instCatalogT = np.transpose(instCatalog)[idx]
instCatalog = np.transpose(instCatalogT)
# Measure the distance between matched pairs. Important to down-select viable pairs.
distance = cReference.separation(cInstrument[idx]).arcsecond
# Define a reasonable condition on what is a good match on good photometry
condition = (distance < 5) & (instCatalog['FLUX'] > 0) & (refcatalog[referenceFilterName] > 0) & (
refcatalog[referenceFilterName] < 26)
# Calculate instrumental magnitude from PSF instrument photometry
instmag = -2.5 * np.log10(instCatalog['FLUX'][condition] / retCatalog['exptime'])
# Calculate the magnitude difference between reference and inst catalog
retCatalog['instmag'] = instmag
retCatalog['refcol'] = (refcatalog['g'] - refcatalog['i'])[condition]
retCatalog['refmag'] = refcatalog[referenceFilterName][condition]
retCatalog['ra'] = refcatalog['RA'][condition]
retCatalog['dec'] = refcatalog['DEC'][condition]
retCatalog['matchDistance'] = distance[condition]
# TODO: Read photometric error columns from reference and instrument catalogs, properly propagate error.
return retCatalog
def reject_outliers(self, data, m=2):
"""
Reject data from vector that are > m std deviations from median
:param m:
:return:
"""
std = np.std(data)
return data[abs(data - np.median(data)) < m * std]
def analyzeImage(self, imageName, outputdb= None,
outputimageRootDir=None, mintexp=60):
""" Do full photometric zeropoint analysis on an image
"""
# TODO: Make this procedure thread safe so it can be accelerated a bit.
retCatalog = self.generateCrossmatchedCatalog(imageName, mintexp=mintexp)
if (retCatalog is None) or (retCatalog['instmag'] is None) or (len(retCatalog['ra']) < 10):
if retCatalog is None:
return
if len(retCatalog['ra']) < 10:
_logger.info ("%s: Catalog returned, but is has less than 10 stars. Ignoring. " % (imageName))
return
# calculate the per star zeropoint
magZP = retCatalog['refmag'] - retCatalog['instmag']
refmag = retCatalog['refmag']
refcol = retCatalog['refcol']
# Calculate the photometric zeropoint.
# TODO: Robust median w/ rejection, error propagation.
cleandata = self.reject_outliers(magZP, 3)
photzp = np.median(cleandata)
photzpsig = np.std(cleandata)
# calculate color term
try:
cond = (refcol > 0) & (refcol < 3) & (np.abs(magZP - photzp) < 0.75)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
delta = np.abs(magZP - photzp - color_p(refcol))
cond = (delta < 0.2)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
colorterm = colorparams[0]
except:
_logger.warning("could not fit a color term. ")
color_p = None
colorterm = 0
# if requested, generate all sorts of diagnostic plots
if (outputimageRootDir is not None) and (os.path.exists(outputimageRootDir)):
outbasename = os.path.basename(imageName)
outbasename = re.sub('.fits.fz', '', outbasename)
### Zeropoint plot
plt.figure()
plt.plot(refmag, magZP, '.')
plt.xlim([10, 22])
plt.ylim([photzp-0.5, photzp+0.5])
plt.axhline(y=photzp, color='r', linestyle='-')
plt.xlabel("Reference catalog mag")
plt.ylabel("Reference Mag - Instrumnetal Mag (%s)" % (retCatalog['instfilter']))
plt.title("Photometric zeropoint %s %5.2f" % (outbasename, photzp))
plt.savefig("%s/%s_%s_zp.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
### Color term plot
plt.figure()
plt.plot(refcol, magZP - photzp, '.')
if color_p is not None:
xp = np.linspace(-0.5, 3.5, 10)
plt.plot(xp, color_p(xp), '-', label="color term fit % 6.4f" % (colorterm))
plt.legend()
plt.xlim ([-0.5, 3.0])
plt.ylim ([-1,1])
plt.xlabel("(g-r)$_{\\rm{SDSS}}$ Reference")
plt.ylabel("Reference Mag - Instrumnetal Mag - ZP (%5.2f) %s" % (photzp, retCatalog['instfilter']))
plt.title("Color correction %s " % (outbasename))
plt.savefig("%s/%s_%s_color.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
# TODO: Make this thread safe, e.g., write to transactional database, or return values for storing externally.
if outputdb is not None:
outputdb.addphotzp ( (imageName, retCatalog['dateobs'].replace('T', ' '), retCatalog['siteid'], retCatalog['domid'],
retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
retCatalog['airmass'], photzp, colorterm, photzpsig))
else:
_logger.info ("Not safing output for image %s " % imageName)
# with open(pickle, 'a') as f:
# output = "%s %s %s %s %s %s %s %s % 6.3f % 6.3f % 6.3f\n" % (
# imageName, retCatalog['dateobs'], retCatalog['siteid'], retCatalog['domid'],
# retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
# retCatalog['airmass'], photzp, colorterm, photzpsig)
# _logger.info(output)
# f.write(output)
# f.close()
return photzp
class PS1IPP:
""" Class to access local, distilled copy of PS1 data release.
Based on code from WIYN ODI quickreduce pipeline, developed by Ralf Kotula. See:
https://github.com/WIYN-ODI/QuickReduce
"""
FILTERMAPPING = {}
FILTERMAPPING['gp'] = {'refMag': 'g', 'colorTerm': 0.0, 'airmassTerm': 0.20, 'defaultZP': 0.0}
FILTERMAPPING['rp'] = {'refMag': 'r', 'colorTerm': 0.0, 'airmassTerm': 0.12, 'defaultZP': 0.0}
FILTERMAPPING['ip'] = {'refMag': 'i', 'colorTerm': 0.0, 'airmassTerm': 0.08, 'defaultZP': 0.0}
FILTERMAPPING['zp'] = {'refMag': 'z', 'colorTerm': 0.0, 'airmassTerm': 0.05, 'defaultZP': 0.0}
### PS to SDSS color transformations according to Finkbeiner 2016
### http://iopscience.iop.org/article/10.3847/0004-637X/822/2/66/meta#apj522061s2-4 Table 2
### Note that this transformation is valid for stars only. For the purpose of photometric
### calibration, it is desirable to select point sources only from the input catalog.
## Why reverse the order of the color term entries? Data are entered in the order as they are
## shown in paper. Reverse after the fact to avoid confusion when looking at paper
ps1colorterms = {}
ps1colorterms['g'] = [-0.01808, -0.13595, +0.01941, -0.00183][::-1]
ps1colorterms['r'] = [-0.01836, -0.03577, +0.02612, -0.00558][::-1]
ps1colorterms['i'] = [+0.01170, -0.00400, +0.00066, -0.00058][::-1]
ps1colorterms['z'] = [-0.01062, +0.07529, -0.03592, +0.00890][::-1]
def __init__(self, basedir):
self.basedir = basedir
self.skytable = None
def PS1toSDSS(self, table):
"""
Modify table in situ from PS1 to SDSS, requires column names compatible with ps1colorterms definition.
:param table:
:return: modified table.
"""
if table is not None:
pscolor = table['g'] - table['i']
for filter in self.ps1colorterms:
colorcorrection = np.polyval(self.ps1colorterms[filter], pscolor)
table[filter] -= colorcorrection
return table
def isInCatalogFootprint(self, ra, dec):
""" Verify if image is in catalog footprint.
TODO: Account for image field of view
"""
# PanSTARRS has valid entries for DEc > - 30 degrees
return dec >= -30.0
def get_reference_catalog(self, ra, dec, radius, overwrite_select=False):
""" Read i fits table from local catalog copy. Concatenate tables columns
from different fits tables for full coverage.
"""
# A lot of safeguarding boiler plate to ensure catalog files are valid.
if (self.basedir is None) or (not os.path.isdir(self.basedir)):
_logger.error("Unable to find reference catalog: %s" % (str(self.basedir)))
return None
# Load the SkyTable so we know in what files to look for the catalog"
_logger.debug("Using catalog found in %s" % (self.basedir))
skytable_filename = "%s/SkyTable.fits" % (self.basedir)
if (not os.path.isfile(skytable_filename)):
_logger.fatal("Unable to find catalog index file in %s!" % (self.basedir))
return None
# Read in the master index hdu
skytable_hdu = fits.open(skytable_filename)
skytable = skytable_hdu['SKY_REGION'].data
# Select entries that match our list
# print ra, dec, radius, type(ra), type(dec), type(radius)
# logger.debug("# Searching for stars within %.1f degress around %f , %f ..." % (radius, ra, dec))
if (not radius == None and radius > 0):
min_dec = dec - radius
max_dec = dec + radius
min_ra = ra - radius / math.cos(math.radians(dec))
max_ra = ra + radius / math.cos(math.radians(dec))
else:
min_dec, max_dec = dec[0], dec[1]
min_ra, max_ra = ra[0], ra[1]
_logger.debug("Querying catalog: Ra=%f...%f Dec=%f...%f" % (min_ra, max_ra, min_dec, max_dec))
if (max_ra > 360.):
# This wraps around the high end, shift all ra values by -180
# Now all search RAs are ok and around the 180, next also move the catalog values
selected = skytable['R_MIN'] < 180
skytable['R_MAX'][selected] += 360
skytable['R_MIN'][selected] += 360
if (min_ra < 0):
# Wrap around at the low end
selected = skytable['R_MAX'] > 180
skytable['R_MAX'][selected] -= 360
skytable['R_MIN'][selected] -= 360
_logger.debug("# Search radius: RA=%.1f ... %.1f DEC=%.1f ... %.1f" % (min_ra, max_ra, min_dec, max_dec))
try:
needed_catalogs = (skytable['PARENT'] > 0) & (skytable['PARENT'] < 25) & \
(skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
except KeyError:
# try without the PARENT field
needed_catalogs = (skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
# print skytable[needed_catalogs]
files_to_read = skytable['NAME'][needed_catalogs]
files_to_read = [f.strip() for f in files_to_read]
_logger.debug(files_to_read)
skytable_hdu.close() # Warning: might erase the loaded data, might need to copy array!
# Now quickly go over the list and take care of all filenames that still have a 0x00 in them
for i in range(len(files_to_read)):
found_at = files_to_read[i].find('\0')
if (found_at > 0):
files_to_read[i] = files_to_read[i][:found_at]
# Load all frames, one by one, and select all stars in the valid range.
# Then add them to the catalog with RAs and DECs
full_catalog = None # numpy.zeros(shape=(0,6))
catalog_filenames = []
# Start iterating though catalogs
for catalogname in files_to_read:
catalogfile = "%s/%s" % (self.basedir, catalogname)
# print catalogfile
if (not os.path.isfile(catalogfile)):
# not a file, try adding .fits to the end of the filename
if (os.path.isfile(catalogfile + ".fits")):
catalogfile += ".fits"
else:
# neither option (w/ or w/o .fits added is a file)
_logger.warning(
"Catalog file (%s) not found (base-dir: %s)" % (os.path.abspath(catalogfile), self.basedir))
continue
try:
hdu_cat = fits.open(catalogfile)
except:
_logger.warning("Unable to open catalog file %s" % (catalogfile))
continue
catalog_filenames.append(catalogfile)
_logger.debug("Adding %s to list of catalog files being used" % (catalogfile))
# read table into a nd-array buffer
cat_full = hdu_cat[1].data
hdu_cat.close()
# Read the RA and DEC values
cat_ra = cat_full['RA']
cat_dec = cat_full['DEC']
# To select the right region, shift a temporary catalog
cat_ra_shifted = cat_ra
if (max_ra > 360.):
cat_ra_shifted[cat_ra < 180] += 360
elif (min_ra < 0):
|
select_from_cat = (cat_ra_shifted > min_ra) & (cat_ra_shifted < max_ra) & (cat_dec > min_dec) & (
cat_dec < max_dec)
array_to_add = cat_full[select_from_cat]
_logger.debug("Read %d sources from %s" % (array_to_add.shape[0], catalogname))
if (full_catalog is None):
full_catalog = array_to_add
else:
full_catalog = np.append(full_catalog, array_to_add, axis=0)
# print photom_grizy[:3,:]
if (full_catalog is None):
_logger.warning("No stars found in area %s, %s from catalog %s" % (
str(ra), str(dec),
# ra[0], ra[1], dec[0], dec[1],
self.basedir))
else:
_logger.debug(
"Read a total of %d stars from %d catalogs!" % (full_catalog.shape[0], len(files_to_read)))
self.PS1toSDSS(full_catalog)
return full_catalog
#### Wrapper routines to use photometric zeropointing stand-alone
def crawlDirectory(directory, db, args):
search = "%s/*-[es][19]1.fits.fz" % (directory)
inputlist = glob.glob(search)
initialsize = len (inputlist)
rejects = []
if not args.redo:
for image in inputlist:
if db.exists(image):
rejects.append (image)
for r in rejects:
inputlist.remove (r)
print ("Found %d files intially, but cleaned %d already measured images. Starting analysis of %d files" % (initialsize, len(rejects), len(inputlist)))
photzpStage = PhotCalib(args.ps1dir)
for image in inputlist:
image = image.rstrip()
photzpStage.analyzeImage(image, outputdb=db, outputimageRootDir=args.outputimageRootDir, mintexp=args.mintexp)
def crawlSiteCameraArchive(site, camera, args, date=None):
'''
Process in the archive
:param site:
:param camera:
:param args:
:param date:
:return:
'''
if date is None:
date = '*'
if site is None:
_logger.error ("Must define a site !")
exit (1)
imagedb = photdbinterface(args.imagedbPrefix)
searchdir = "%s/%s/%s/%s/%s" % (args.rootdir, site, camera, date, args.processstatus)
# search = "%s/%s/%s/%s/preview/*-[es]11.fits.fz" % (args.rootdir, site, camera, date)
_logger.info("Searching in directories: %s" % (searchdir))
crawlDirectory(searchdir, imagedb, args)
imagedb.close();
def crawlSite(site, type, args):
""" Search for all cameras of a given type (fl, kb,fs) in a site directory and processed them """
searchdir = "%s/%s/%s*" % (args.rootdir, site, type)
cameralist = glob.glob(searchdir)
cameras = []
for candidate in cameralist:
cameras.append((site, os.path.basename(os.path.normpath(candidate))))
for setup in cameras:
for date in args.date:
print(setup[0], setup[1], date)
crawlSiteCameraArchive(setup[0], setup[1], args, date)
def parseCommandLine():
""" Read command line parameters
"""
parser = argparse.ArgumentParser(
description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')
parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],
help='Set the log level')
parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',
help='Directory of PS1 catalog')
parser.add_argument("--diagnosticplotsdir", dest='outputimageRootDir', default=None,
help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')
parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',
help='Result output directory. .db file is written here')
parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',
help="LCO archive root directory")
parser.add_argument('--site', dest='site', default=None, help='sites code for camera')
parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')
parser.add_argument('--redo', action='store_true')
parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')
mutex.add_argument('--lastNdays', type=int)
cameragroup = parser.add_mutually_exclusive_group()
cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')
cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],
help='camera type to process at selected sites to process. ')
cameragroup.add_argument('--crawldirectory', default=None, type=str,
help="process all reduced image in specific directoy")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')
args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)
if args.outputimageRootDir is not None:
args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)
print ("Writing db to directory: %s" % args.outputimageRootDir)
if args.crawldirectory is not None:
args.crawldirectory = os.path.expanduser(args.crawldirectory)
if (args.lastNdays is not None):
args.date=[]
today = datetime.datetime.utcnow()
for ii in range (args.lastNdays):
day = today - datetime.timedelta(days=ii)
args.date.append (day.strftime("%Y%m%d"))
args.date = args.date[::-1]
args.ps1dir = os.path.expanduser(args.ps1dir)
print (args.processstatus)
return args
def photzpmain():
args = parseCommandLine()
if args.cameratype is not None:
cameras = [camera for camera in args.cameratype.split(',')]
if args.site is not None:
sites = [site for site in args.site.split(',')]
else:
sites = ('lsc', 'cpt', 'ogg', 'coj', 'tfn', 'elp', 'sqa', 'bpl')
print("Crawling through camera types ", cameras, " at sites ", sites, " for date ", args.date)
for site in sites:
for cameratype in cameras:
crawlSite(site, cameratype, args)
elif args.camera is not None:
if args.site is None:
sites = '*'
else:
sites = args.site
print("Calibrating camera ", args.camera, " at site ", sites, ' for date ', args.date)
for date in args.date:
crawlSiteCameraArchive(sites, args.camera, args, date=date)
elif args.crawldirectory is not None:
imagedb = photdbinterface("%s/%s" % (args.crawldirectory, 'imagezp.db'))
crawlDirectory(args.crawldirectory, imagedb, args)
imagedb.close()
else:
print("Need to specify either a camera, or a camera type.")
sys.exit(0)
if __name__ == '__main__':
assert sys.version_info >= (3,5)
photzpmain()
| cat_ra_shifted[cat_ra > 180] -= 360 | conditional_block |
photcalibration.py | #from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib
from longtermphotzp import photdbinterface
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
import argparse
import re
import glob
import os
import math
import sys
import logging
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy import units as u
import datetime
_logger = logging.getLogger(__name__)
__author__ = 'dharbeck'
class PhotCalib():
# To be replaced with map:
# LCO filter -> sdss filter name, sdsss g-i color term, airmass term, default zero point.
# possibly need this for all site:telescope:camera:filter settings. Maybe start with one
# default and see where it goes.
ps1catalog = None
def __init__(self, ps1dir):
# super(PhotCalib, self).__init__(pipeline_context)
# TODO: capture error if no ps1 catalog is found at location
self.ps1catalog = PS1IPP(ps1dir)
def do_stage(self, images):
""" future hook for BANZAI pipeline integration
"""
for i, image in enumerate(images):
pass
# logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)
def generateCrossmatchedCatalog(self, image, mintexp=60):
""" Load the banzai-generated photometry catalog from 'CAT' extension, queries PS1 catalog for image FoV, and
returns a cross-matched catalog.
Errors conditions:
if photometricfilter is not supported, none is returned
if exposure time is < 60 seconds, None is returned.
:param image: input fits image path+name
:return:
"""
# Build up a baseline refernce catlog with meta data
retCatalog = {'fname': image,
'instmag': None
}
# Read banzai star catalog
testimage = fits.open(image)
# Boilerplate grab of status information
ra = testimage['SCI'].header['CRVAL1']
dec = testimage['SCI'].header['CRVAL2']
if not self.ps1catalog.isInCatalogFootprint(ra, dec):
_logger.debug("Image not in PS1 footprint. Ignoring")
testimage.close()
return None
retCatalog['exptime'] = testimage['SCI'].header['EXPTIME']
retCatalog['instfilter'] = testimage['SCI'].header['FILTER']
retCatalog['airmass'] = testimage['SCI'].header['AIRMASS']
retCatalog['dateobs'] = testimage['SCI'].header['DATE-OBS']
retCatalog['instrument'] = testimage['SCI'].header['INSTRUME']
retCatalog['siteid'] = testimage['SCI'].header['SITEID']
retCatalog['domid'] = testimage['SCI'].header['ENCID']
retCatalog['telescope'] = testimage['SCI'].header['TELID']
retCatalog['FOCOBOFF'] = testimage['SCI'].header['FOCOBOFF']
# Check if filter is supported
if retCatalog['instfilter'] not in self.ps1catalog.FILTERMAPPING:
_logger.debug("%s - Filter %s not viable for photometric calibration. Sorry" % (image, retCatalog['instfilter']))
testimage.close()
return None
# Check if exposure time is long enough
if (retCatalog['exptime'] < mintexp):
_logger.debug("%s - Exposure %s time is deemed too short, ignoring" % (image, retCatalog['exptime']))
testimage.close()
return None
# verify there is no deliberate defocus
if (retCatalog['FOCOBOFF'] is not None) and (retCatalog['FOCOBOFF'] != 0):
_logger.debug("%s - Exposure is deliberately defocussed by %s, ignoring" % (image, retCatalog['FOCOBOFF']))
testimage.close()
return None
# Get the instrumental filter and the matching reference catalog filter names.
referenceInformation = self.ps1catalog.FILTERMAPPING[retCatalog['instfilter']]
referenceFilterName = referenceInformation['refMag']
# Load photometry catalog from image, and transform into RA/Dec coordinates
try:
instCatalog = testimage['CAT'].data
except:
_logger.warning("%s - No extension \'CAT\' available, skipping." % (image))
testimage.close()
return None
# Transform the image catalog to RA / Dec based on the WCS solution in the header.
# TODO: rerun astrometry.net with a higher order distortion model
image_wcs = WCS(testimage['SCI'].header)
try:
ras, decs = image_wcs.all_pix2world(instCatalog['x'], instCatalog['y'], 1)
except:
_logger.error("%s: Failed to convert images coordinates to world coordinates. Giving up on file." % (image))
testimage.close()
return None
# Now we have all we wanted from the input image, close it
testimage.close()
# Query reference catalog TODO: paramterize FoV of query!
refcatalog = self.ps1catalog.get_reference_catalog(ra, dec, 0.25)
if refcatalog is None:
_logger.warning("%s, no reference catalog received." % image)
return None
# Start the catalog matching, using astropy skycoords built-in functions.
cInstrument = SkyCoord(ra=ras * u.degree, dec=decs * u.degree)
cReference = SkyCoord(ra=refcatalog['RA'] * u.degree, dec=refcatalog['DEC'] * u.degree)
idx, d2d, d3d = cReference.match_to_catalog_sky(cInstrument)
# Reshuffle the source catalog to index-match the reference catalog.
# There is probably as smarter way of doing this!
instCatalogT = np.transpose(instCatalog)[idx]
instCatalog = np.transpose(instCatalogT)
# Measure the distance between matched pairs. Important to down-select viable pairs.
distance = cReference.separation(cInstrument[idx]).arcsecond
# Define a reasonable condition on what is a good match on good photometry
condition = (distance < 5) & (instCatalog['FLUX'] > 0) & (refcatalog[referenceFilterName] > 0) & (
refcatalog[referenceFilterName] < 26)
# Calculate instrumental magnitude from PSF instrument photometry
instmag = -2.5 * np.log10(instCatalog['FLUX'][condition] / retCatalog['exptime'])
# Calculate the magnitude difference between reference and inst catalog
retCatalog['instmag'] = instmag
retCatalog['refcol'] = (refcatalog['g'] - refcatalog['i'])[condition]
retCatalog['refmag'] = refcatalog[referenceFilterName][condition]
retCatalog['ra'] = refcatalog['RA'][condition]
retCatalog['dec'] = refcatalog['DEC'][condition]
retCatalog['matchDistance'] = distance[condition]
# TODO: Read photometric error columns from reference and instrument catalogs, properly propagate error.
return retCatalog
def reject_outliers(self, data, m=2):
"""
Reject data from vector that are > m std deviations from median
:param m:
:return:
"""
std = np.std(data)
return data[abs(data - np.median(data)) < m * std]
def analyzeImage(self, imageName, outputdb= None,
outputimageRootDir=None, mintexp=60):
""" Do full photometric zeropoint analysis on an image
"""
# TODO: Make this procedure thread safe so it can be accelerated a bit.
retCatalog = self.generateCrossmatchedCatalog(imageName, mintexp=mintexp)
if (retCatalog is None) or (retCatalog['instmag'] is None) or (len(retCatalog['ra']) < 10):
if retCatalog is None:
return
if len(retCatalog['ra']) < 10:
_logger.info ("%s: Catalog returned, but is has less than 10 stars. Ignoring. " % (imageName))
return
# calculate the per star zeropoint
magZP = retCatalog['refmag'] - retCatalog['instmag']
refmag = retCatalog['refmag']
refcol = retCatalog['refcol']
# Calculate the photometric zeropoint.
# TODO: Robust median w/ rejection, error propagation.
cleandata = self.reject_outliers(magZP, 3)
photzp = np.median(cleandata)
photzpsig = np.std(cleandata)
# calculate color term
try:
cond = (refcol > 0) & (refcol < 3) & (np.abs(magZP - photzp) < 0.75)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
delta = np.abs(magZP - photzp - color_p(refcol))
cond = (delta < 0.2)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
colorterm = colorparams[0]
except:
_logger.warning("could not fit a color term. ")
color_p = None
colorterm = 0
# if requested, generate all sorts of diagnostic plots
if (outputimageRootDir is not None) and (os.path.exists(outputimageRootDir)):
outbasename = os.path.basename(imageName)
outbasename = re.sub('.fits.fz', '', outbasename)
### Zeropoint plot
plt.figure()
plt.plot(refmag, magZP, '.')
plt.xlim([10, 22])
plt.ylim([photzp-0.5, photzp+0.5])
plt.axhline(y=photzp, color='r', linestyle='-')
plt.xlabel("Reference catalog mag")
plt.ylabel("Reference Mag - Instrumnetal Mag (%s)" % (retCatalog['instfilter']))
plt.title("Photometric zeropoint %s %5.2f" % (outbasename, photzp))
plt.savefig("%s/%s_%s_zp.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
### Color term plot
plt.figure()
plt.plot(refcol, magZP - photzp, '.')
if color_p is not None:
xp = np.linspace(-0.5, 3.5, 10)
plt.plot(xp, color_p(xp), '-', label="color term fit % 6.4f" % (colorterm))
plt.legend()
plt.xlim ([-0.5, 3.0])
plt.ylim ([-1,1])
plt.xlabel("(g-r)$_{\\rm{SDSS}}$ Reference")
plt.ylabel("Reference Mag - Instrumnetal Mag - ZP (%5.2f) %s" % (photzp, retCatalog['instfilter']))
plt.title("Color correction %s " % (outbasename))
plt.savefig("%s/%s_%s_color.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
# TODO: Make this thread safe, e.g., write to transactional database, or return values for storing externally.
if outputdb is not None:
outputdb.addphotzp ( (imageName, retCatalog['dateobs'].replace('T', ' '), retCatalog['siteid'], retCatalog['domid'],
retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
retCatalog['airmass'], photzp, colorterm, photzpsig))
else:
_logger.info ("Not safing output for image %s " % imageName)
# with open(pickle, 'a') as f:
# output = "%s %s %s %s %s %s %s %s % 6.3f % 6.3f % 6.3f\n" % (
# imageName, retCatalog['dateobs'], retCatalog['siteid'], retCatalog['domid'],
# retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
# retCatalog['airmass'], photzp, colorterm, photzpsig)
# _logger.info(output)
# f.write(output)
# f.close()
return photzp
class PS1IPP:
""" Class to access local, distilled copy of PS1 data release.
Based on code from WIYN ODI quickreduce pipeline, developed by Ralf Kotula. See:
https://github.com/WIYN-ODI/QuickReduce
"""
FILTERMAPPING = {}
FILTERMAPPING['gp'] = {'refMag': 'g', 'colorTerm': 0.0, 'airmassTerm': 0.20, 'defaultZP': 0.0}
FILTERMAPPING['rp'] = {'refMag': 'r', 'colorTerm': 0.0, 'airmassTerm': 0.12, 'defaultZP': 0.0}
FILTERMAPPING['ip'] = {'refMag': 'i', 'colorTerm': 0.0, 'airmassTerm': 0.08, 'defaultZP': 0.0}
FILTERMAPPING['zp'] = {'refMag': 'z', 'colorTerm': 0.0, 'airmassTerm': 0.05, 'defaultZP': 0.0}
### PS to SDSS color transformations according to Finkbeiner 2016
### http://iopscience.iop.org/article/10.3847/0004-637X/822/2/66/meta#apj522061s2-4 Table 2
### Note that this transformation is valid for stars only. For the purpose of photometric
### calibration, it is desirable to select point sources only from the input catalog.
## Why reverse the order of the color term entries? Data are entered in the order as they are
## shown in paper. Reverse after the fact to avoid confusion when looking at paper
ps1colorterms = {}
ps1colorterms['g'] = [-0.01808, -0.13595, +0.01941, -0.00183][::-1]
ps1colorterms['r'] = [-0.01836, -0.03577, +0.02612, -0.00558][::-1]
ps1colorterms['i'] = [+0.01170, -0.00400, +0.00066, -0.00058][::-1]
ps1colorterms['z'] = [-0.01062, +0.07529, -0.03592, +0.00890][::-1]
def __init__(self, basedir):
self.basedir = basedir
self.skytable = None
def PS1toSDSS(self, table):
"""
Modify table in situ from PS1 to SDSS, requires column names compatible with ps1colorterms definition.
:param table:
:return: modified table.
"""
if table is not None:
pscolor = table['g'] - table['i']
for filter in self.ps1colorterms:
colorcorrection = np.polyval(self.ps1colorterms[filter], pscolor)
table[filter] -= colorcorrection
return table
def isInCatalogFootprint(self, ra, dec):
""" Verify if image is in catalog footprint.
TODO: Account for image field of view
"""
# PanSTARRS has valid entries for DEc > - 30 degrees
return dec >= -30.0
def get_reference_catalog(self, ra, dec, radius, overwrite_select=False):
""" Read i fits table from local catalog copy. Concatenate tables columns
from different fits tables for full coverage.
"""
# A lot of safeguarding boiler plate to ensure catalog files are valid.
if (self.basedir is None) or (not os.path.isdir(self.basedir)):
_logger.error("Unable to find reference catalog: %s" % (str(self.basedir)))
return None
# Load the SkyTable so we know in what files to look for the catalog"
_logger.debug("Using catalog found in %s" % (self.basedir))
skytable_filename = "%s/SkyTable.fits" % (self.basedir)
if (not os.path.isfile(skytable_filename)):
_logger.fatal("Unable to find catalog index file in %s!" % (self.basedir))
return None
# Read in the master index hdu
skytable_hdu = fits.open(skytable_filename)
skytable = skytable_hdu['SKY_REGION'].data
# Select entries that match our list
# print ra, dec, radius, type(ra), type(dec), type(radius)
# logger.debug("# Searching for stars within %.1f degress around %f , %f ..." % (radius, ra, dec))
if (not radius == None and radius > 0):
min_dec = dec - radius
max_dec = dec + radius
min_ra = ra - radius / math.cos(math.radians(dec))
max_ra = ra + radius / math.cos(math.radians(dec))
else:
min_dec, max_dec = dec[0], dec[1]
min_ra, max_ra = ra[0], ra[1]
_logger.debug("Querying catalog: Ra=%f...%f Dec=%f...%f" % (min_ra, max_ra, min_dec, max_dec))
if (max_ra > 360.):
# This wraps around the high end, shift all ra values by -180
# Now all search RAs are ok and around the 180, next also move the catalog values
selected = skytable['R_MIN'] < 180
skytable['R_MAX'][selected] += 360
skytable['R_MIN'][selected] += 360
if (min_ra < 0):
# Wrap around at the low end
selected = skytable['R_MAX'] > 180
skytable['R_MAX'][selected] -= 360
skytable['R_MIN'][selected] -= 360
_logger.debug("# Search radius: RA=%.1f ... %.1f DEC=%.1f ... %.1f" % (min_ra, max_ra, min_dec, max_dec))
try:
needed_catalogs = (skytable['PARENT'] > 0) & (skytable['PARENT'] < 25) & \
(skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
except KeyError:
# try without the PARENT field
needed_catalogs = (skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
# print skytable[needed_catalogs]
files_to_read = skytable['NAME'][needed_catalogs]
files_to_read = [f.strip() for f in files_to_read]
_logger.debug(files_to_read)
skytable_hdu.close() # Warning: might erase the loaded data, might need to copy array!
# Now quickly go over the list and take care of all filenames that still have a 0x00 in them
for i in range(len(files_to_read)):
found_at = files_to_read[i].find('\0')
if (found_at > 0):
files_to_read[i] = files_to_read[i][:found_at]
# Load all frames, one by one, and select all stars in the valid range.
# Then add them to the catalog with RAs and DECs
full_catalog = None # numpy.zeros(shape=(0,6))
catalog_filenames = []
# Start iterating though catalogs
for catalogname in files_to_read:
catalogfile = "%s/%s" % (self.basedir, catalogname)
# print catalogfile
if (not os.path.isfile(catalogfile)):
# not a file, try adding .fits to the end of the filename
if (os.path.isfile(catalogfile + ".fits")):
catalogfile += ".fits"
else:
# neither option (w/ or w/o .fits added is a file)
_logger.warning(
"Catalog file (%s) not found (base-dir: %s)" % (os.path.abspath(catalogfile), self.basedir))
continue
try:
hdu_cat = fits.open(catalogfile)
except:
_logger.warning("Unable to open catalog file %s" % (catalogfile))
continue
catalog_filenames.append(catalogfile)
_logger.debug("Adding %s to list of catalog files being used" % (catalogfile))
# read table into a nd-array buffer
cat_full = hdu_cat[1].data
hdu_cat.close()
# Read the RA and DEC values
cat_ra = cat_full['RA']
cat_dec = cat_full['DEC']
# To select the right region, shift a temporary catalog
cat_ra_shifted = cat_ra
if (max_ra > 360.):
cat_ra_shifted[cat_ra < 180] += 360
elif (min_ra < 0):
cat_ra_shifted[cat_ra > 180] -= 360
select_from_cat = (cat_ra_shifted > min_ra) & (cat_ra_shifted < max_ra) & (cat_dec > min_dec) & (
cat_dec < max_dec)
array_to_add = cat_full[select_from_cat]
_logger.debug("Read %d sources from %s" % (array_to_add.shape[0], catalogname))
if (full_catalog is None):
full_catalog = array_to_add
else:
full_catalog = np.append(full_catalog, array_to_add, axis=0)
# print photom_grizy[:3,:]
if (full_catalog is None):
_logger.warning("No stars found in area %s, %s from catalog %s" % (
str(ra), str(dec),
# ra[0], ra[1], dec[0], dec[1],
self.basedir))
else:
_logger.debug(
"Read a total of %d stars from %d catalogs!" % (full_catalog.shape[0], len(files_to_read)))
self.PS1toSDSS(full_catalog)
return full_catalog
#### Wrapper routines to use photometric zeropointing stand-alone
def crawlDirectory(directory, db, args):
search = "%s/*-[es][19]1.fits.fz" % (directory)
inputlist = glob.glob(search)
initialsize = len (inputlist)
rejects = []
if not args.redo:
for image in inputlist:
if db.exists(image):
rejects.append (image)
for r in rejects:
inputlist.remove (r)
print ("Found %d files intially, but cleaned %d already measured images. Starting analysis of %d files" % (initialsize, len(rejects), len(inputlist)))
photzpStage = PhotCalib(args.ps1dir)
for image in inputlist:
image = image.rstrip()
photzpStage.analyzeImage(image, outputdb=db, outputimageRootDir=args.outputimageRootDir, mintexp=args.mintexp)
def crawlSiteCameraArchive(site, camera, args, date=None):
'''
Process in the archive | :param args:
:param date:
:return:
'''
if date is None:
date = '*'
if site is None:
_logger.error ("Must define a site !")
exit (1)
imagedb = photdbinterface(args.imagedbPrefix)
searchdir = "%s/%s/%s/%s/%s" % (args.rootdir, site, camera, date, args.processstatus)
# search = "%s/%s/%s/%s/preview/*-[es]11.fits.fz" % (args.rootdir, site, camera, date)
_logger.info("Searching in directories: %s" % (searchdir))
crawlDirectory(searchdir, imagedb, args)
imagedb.close();
def crawlSite(site, type, args):
""" Search for all cameras of a given type (fl, kb,fs) in a site directory and processed them """
searchdir = "%s/%s/%s*" % (args.rootdir, site, type)
cameralist = glob.glob(searchdir)
cameras = []
for candidate in cameralist:
cameras.append((site, os.path.basename(os.path.normpath(candidate))))
for setup in cameras:
for date in args.date:
print(setup[0], setup[1], date)
crawlSiteCameraArchive(setup[0], setup[1], args, date)
def parseCommandLine():
""" Read command line parameters
"""
parser = argparse.ArgumentParser(
description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')
parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],
help='Set the log level')
parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',
help='Directory of PS1 catalog')
parser.add_argument("--diagnosticplotsdir", dest='outputimageRootDir', default=None,
help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')
parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',
help='Result output directory. .db file is written here')
parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',
help="LCO archive root directory")
parser.add_argument('--site', dest='site', default=None, help='sites code for camera')
parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')
parser.add_argument('--redo', action='store_true')
parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')
mutex.add_argument('--lastNdays', type=int)
cameragroup = parser.add_mutually_exclusive_group()
cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')
cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],
help='camera type to process at selected sites to process. ')
cameragroup.add_argument('--crawldirectory', default=None, type=str,
help="process all reduced image in specific directoy")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')
args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)
if args.outputimageRootDir is not None:
args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)
print ("Writing db to directory: %s" % args.outputimageRootDir)
if args.crawldirectory is not None:
args.crawldirectory = os.path.expanduser(args.crawldirectory)
if (args.lastNdays is not None):
args.date=[]
today = datetime.datetime.utcnow()
for ii in range (args.lastNdays):
day = today - datetime.timedelta(days=ii)
args.date.append (day.strftime("%Y%m%d"))
args.date = args.date[::-1]
args.ps1dir = os.path.expanduser(args.ps1dir)
print (args.processstatus)
return args
def photzpmain():
args = parseCommandLine()
if args.cameratype is not None:
cameras = [camera for camera in args.cameratype.split(',')]
if args.site is not None:
sites = [site for site in args.site.split(',')]
else:
sites = ('lsc', 'cpt', 'ogg', 'coj', 'tfn', 'elp', 'sqa', 'bpl')
print("Crawling through camera types ", cameras, " at sites ", sites, " for date ", args.date)
for site in sites:
for cameratype in cameras:
crawlSite(site, cameratype, args)
elif args.camera is not None:
if args.site is None:
sites = '*'
else:
sites = args.site
print("Calibrating camera ", args.camera, " at site ", sites, ' for date ', args.date)
for date in args.date:
crawlSiteCameraArchive(sites, args.camera, args, date=date)
elif args.crawldirectory is not None:
imagedb = photdbinterface("%s/%s" % (args.crawldirectory, 'imagezp.db'))
crawlDirectory(args.crawldirectory, imagedb, args)
imagedb.close()
else:
print("Need to specify either a camera, or a camera type.")
sys.exit(0)
if __name__ == '__main__':
assert sys.version_info >= (3,5)
photzpmain() |
:param site:
:param camera: | random_line_split |
photcalibration.py | #from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib
from longtermphotzp import photdbinterface
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import numpy as np
import argparse
import re
import glob
import os
import math
import sys
import logging
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy import units as u
import datetime
_logger = logging.getLogger(__name__)
__author__ = 'dharbeck'
class PhotCalib():
# To be replaced with map:
# LCO filter -> sdss filter name, sdsss g-i color term, airmass term, default zero point.
# possibly need this for all site:telescope:camera:filter settings. Maybe start with one
# default and see where it goes.
ps1catalog = None
def __init__(self, ps1dir):
# super(PhotCalib, self).__init__(pipeline_context)
# TODO: capture error if no ps1 catalog is found at location
self.ps1catalog = PS1IPP(ps1dir)
def do_stage(self, images):
""" future hook for BANZAI pipeline integration
"""
for i, image in enumerate(images):
pass
# logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)
def generateCrossmatchedCatalog(self, image, mintexp=60):
""" Load the banzai-generated photometry catalog from 'CAT' extension, queries PS1 catalog for image FoV, and
returns a cross-matched catalog.
Errors conditions:
if photometricfilter is not supported, none is returned
if exposure time is < 60 seconds, None is returned.
:param image: input fits image path+name
:return:
"""
# Build up a baseline refernce catlog with meta data
retCatalog = {'fname': image,
'instmag': None
}
# Read banzai star catalog
testimage = fits.open(image)
# Boilerplate grab of status information
ra = testimage['SCI'].header['CRVAL1']
dec = testimage['SCI'].header['CRVAL2']
if not self.ps1catalog.isInCatalogFootprint(ra, dec):
_logger.debug("Image not in PS1 footprint. Ignoring")
testimage.close()
return None
retCatalog['exptime'] = testimage['SCI'].header['EXPTIME']
retCatalog['instfilter'] = testimage['SCI'].header['FILTER']
retCatalog['airmass'] = testimage['SCI'].header['AIRMASS']
retCatalog['dateobs'] = testimage['SCI'].header['DATE-OBS']
retCatalog['instrument'] = testimage['SCI'].header['INSTRUME']
retCatalog['siteid'] = testimage['SCI'].header['SITEID']
retCatalog['domid'] = testimage['SCI'].header['ENCID']
retCatalog['telescope'] = testimage['SCI'].header['TELID']
retCatalog['FOCOBOFF'] = testimage['SCI'].header['FOCOBOFF']
# Check if filter is supported
if retCatalog['instfilter'] not in self.ps1catalog.FILTERMAPPING:
_logger.debug("%s - Filter %s not viable for photometric calibration. Sorry" % (image, retCatalog['instfilter']))
testimage.close()
return None
# Check if exposure time is long enough
if (retCatalog['exptime'] < mintexp):
_logger.debug("%s - Exposure %s time is deemed too short, ignoring" % (image, retCatalog['exptime']))
testimage.close()
return None
# verify there is no deliberate defocus
if (retCatalog['FOCOBOFF'] is not None) and (retCatalog['FOCOBOFF'] != 0):
_logger.debug("%s - Exposure is deliberately defocussed by %s, ignoring" % (image, retCatalog['FOCOBOFF']))
testimage.close()
return None
# Get the instrumental filter and the matching reference catalog filter names.
referenceInformation = self.ps1catalog.FILTERMAPPING[retCatalog['instfilter']]
referenceFilterName = referenceInformation['refMag']
# Load photometry catalog from image, and transform into RA/Dec coordinates
try:
instCatalog = testimage['CAT'].data
except:
_logger.warning("%s - No extension \'CAT\' available, skipping." % (image))
testimage.close()
return None
# Transform the image catalog to RA / Dec based on the WCS solution in the header.
# TODO: rerun astrometry.net with a higher order distortion model
image_wcs = WCS(testimage['SCI'].header)
try:
ras, decs = image_wcs.all_pix2world(instCatalog['x'], instCatalog['y'], 1)
except:
_logger.error("%s: Failed to convert images coordinates to world coordinates. Giving up on file." % (image))
testimage.close()
return None
# Now we have all we wanted from the input image, close it
testimage.close()
# Query reference catalog TODO: paramterize FoV of query!
refcatalog = self.ps1catalog.get_reference_catalog(ra, dec, 0.25)
if refcatalog is None:
_logger.warning("%s, no reference catalog received." % image)
return None
# Start the catalog matching, using astropy skycoords built-in functions.
cInstrument = SkyCoord(ra=ras * u.degree, dec=decs * u.degree)
cReference = SkyCoord(ra=refcatalog['RA'] * u.degree, dec=refcatalog['DEC'] * u.degree)
idx, d2d, d3d = cReference.match_to_catalog_sky(cInstrument)
# Reshuffle the source catalog to index-match the reference catalog.
# There is probably as smarter way of doing this!
instCatalogT = np.transpose(instCatalog)[idx]
instCatalog = np.transpose(instCatalogT)
# Measure the distance between matched pairs. Important to down-select viable pairs.
distance = cReference.separation(cInstrument[idx]).arcsecond
# Define a reasonable condition on what is a good match on good photometry
condition = (distance < 5) & (instCatalog['FLUX'] > 0) & (refcatalog[referenceFilterName] > 0) & (
refcatalog[referenceFilterName] < 26)
# Calculate instrumental magnitude from PSF instrument photometry
instmag = -2.5 * np.log10(instCatalog['FLUX'][condition] / retCatalog['exptime'])
# Calculate the magnitude difference between reference and inst catalog
retCatalog['instmag'] = instmag
retCatalog['refcol'] = (refcatalog['g'] - refcatalog['i'])[condition]
retCatalog['refmag'] = refcatalog[referenceFilterName][condition]
retCatalog['ra'] = refcatalog['RA'][condition]
retCatalog['dec'] = refcatalog['DEC'][condition]
retCatalog['matchDistance'] = distance[condition]
# TODO: Read photometric error columns from reference and instrument catalogs, properly propagate error.
return retCatalog
def reject_outliers(self, data, m=2):
"""
Reject data from vector that are > m std deviations from median
:param m:
:return:
"""
std = np.std(data)
return data[abs(data - np.median(data)) < m * std]
def analyzeImage(self, imageName, outputdb= None,
outputimageRootDir=None, mintexp=60):
""" Do full photometric zeropoint analysis on an image
"""
# TODO: Make this procedure thread safe so it can be accelerated a bit.
retCatalog = self.generateCrossmatchedCatalog(imageName, mintexp=mintexp)
if (retCatalog is None) or (retCatalog['instmag'] is None) or (len(retCatalog['ra']) < 10):
if retCatalog is None:
return
if len(retCatalog['ra']) < 10:
_logger.info ("%s: Catalog returned, but is has less than 10 stars. Ignoring. " % (imageName))
return
# calculate the per star zeropoint
magZP = retCatalog['refmag'] - retCatalog['instmag']
refmag = retCatalog['refmag']
refcol = retCatalog['refcol']
# Calculate the photometric zeropoint.
# TODO: Robust median w/ rejection, error propagation.
cleandata = self.reject_outliers(magZP, 3)
photzp = np.median(cleandata)
photzpsig = np.std(cleandata)
# calculate color term
try:
cond = (refcol > 0) & (refcol < 3) & (np.abs(magZP - photzp) < 0.75)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
delta = np.abs(magZP - photzp - color_p(refcol))
cond = (delta < 0.2)
colorparams = np.polyfit(refcol[cond], (magZP - photzp)[cond], 1)
color_p = np.poly1d(colorparams)
colorterm = colorparams[0]
except:
_logger.warning("could not fit a color term. ")
color_p = None
colorterm = 0
# if requested, generate all sorts of diagnostic plots
if (outputimageRootDir is not None) and (os.path.exists(outputimageRootDir)):
outbasename = os.path.basename(imageName)
outbasename = re.sub('.fits.fz', '', outbasename)
### Zeropoint plot
plt.figure()
plt.plot(refmag, magZP, '.')
plt.xlim([10, 22])
plt.ylim([photzp-0.5, photzp+0.5])
plt.axhline(y=photzp, color='r', linestyle='-')
plt.xlabel("Reference catalog mag")
plt.ylabel("Reference Mag - Instrumnetal Mag (%s)" % (retCatalog['instfilter']))
plt.title("Photometric zeropoint %s %5.2f" % (outbasename, photzp))
plt.savefig("%s/%s_%s_zp.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
### Color term plot
plt.figure()
plt.plot(refcol, magZP - photzp, '.')
if color_p is not None:
xp = np.linspace(-0.5, 3.5, 10)
plt.plot(xp, color_p(xp), '-', label="color term fit % 6.4f" % (colorterm))
plt.legend()
plt.xlim ([-0.5, 3.0])
plt.ylim ([-1,1])
plt.xlabel("(g-r)$_{\\rm{SDSS}}$ Reference")
plt.ylabel("Reference Mag - Instrumnetal Mag - ZP (%5.2f) %s" % (photzp, retCatalog['instfilter']))
plt.title("Color correction %s " % (outbasename))
plt.savefig("%s/%s_%s_color.png" % (outputimageRootDir, outbasename, retCatalog['instfilter']))
plt.close()
# TODO: Make this thread safe, e.g., write to transactional database, or return values for storing externally.
if outputdb is not None:
outputdb.addphotzp ( (imageName, retCatalog['dateobs'].replace('T', ' '), retCatalog['siteid'], retCatalog['domid'],
retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
retCatalog['airmass'], photzp, colorterm, photzpsig))
else:
_logger.info ("Not safing output for image %s " % imageName)
# with open(pickle, 'a') as f:
# output = "%s %s %s %s %s %s %s %s % 6.3f % 6.3f % 6.3f\n" % (
# imageName, retCatalog['dateobs'], retCatalog['siteid'], retCatalog['domid'],
# retCatalog['telescope'], retCatalog['instrument'], retCatalog['instfilter'],
# retCatalog['airmass'], photzp, colorterm, photzpsig)
# _logger.info(output)
# f.write(output)
# f.close()
return photzp
class PS1IPP:
""" Class to access local, distilled copy of PS1 data release.
Based on code from WIYN ODI quickreduce pipeline, developed by Ralf Kotula. See:
https://github.com/WIYN-ODI/QuickReduce
"""
FILTERMAPPING = {}
FILTERMAPPING['gp'] = {'refMag': 'g', 'colorTerm': 0.0, 'airmassTerm': 0.20, 'defaultZP': 0.0}
FILTERMAPPING['rp'] = {'refMag': 'r', 'colorTerm': 0.0, 'airmassTerm': 0.12, 'defaultZP': 0.0}
FILTERMAPPING['ip'] = {'refMag': 'i', 'colorTerm': 0.0, 'airmassTerm': 0.08, 'defaultZP': 0.0}
FILTERMAPPING['zp'] = {'refMag': 'z', 'colorTerm': 0.0, 'airmassTerm': 0.05, 'defaultZP': 0.0}
### PS to SDSS color transformations according to Finkbeiner 2016
### http://iopscience.iop.org/article/10.3847/0004-637X/822/2/66/meta#apj522061s2-4 Table 2
### Note that this transformation is valid for stars only. For the purpose of photometric
### calibration, it is desirable to select point sources only from the input catalog.
## Why reverse the order of the color term entries? Data are entered in the order as they are
## shown in paper. Reverse after the fact to avoid confusion when looking at paper
ps1colorterms = {}
ps1colorterms['g'] = [-0.01808, -0.13595, +0.01941, -0.00183][::-1]
ps1colorterms['r'] = [-0.01836, -0.03577, +0.02612, -0.00558][::-1]
ps1colorterms['i'] = [+0.01170, -0.00400, +0.00066, -0.00058][::-1]
ps1colorterms['z'] = [-0.01062, +0.07529, -0.03592, +0.00890][::-1]
def __init__(self, basedir):
self.basedir = basedir
self.skytable = None
def PS1toSDSS(self, table):
"""
Modify table in situ from PS1 to SDSS, requires column names compatible with ps1colorterms definition.
:param table:
:return: modified table.
"""
if table is not None:
pscolor = table['g'] - table['i']
for filter in self.ps1colorterms:
colorcorrection = np.polyval(self.ps1colorterms[filter], pscolor)
table[filter] -= colorcorrection
return table
def isInCatalogFootprint(self, ra, dec):
""" Verify if image is in catalog footprint.
TODO: Account for image field of view
"""
# PanSTARRS has valid entries for DEc > - 30 degrees
return dec >= -30.0
def get_reference_catalog(self, ra, dec, radius, overwrite_select=False):
""" Read i fits table from local catalog copy. Concatenate tables columns
from different fits tables for full coverage.
"""
# A lot of safeguarding boiler plate to ensure catalog files are valid.
if (self.basedir is None) or (not os.path.isdir(self.basedir)):
_logger.error("Unable to find reference catalog: %s" % (str(self.basedir)))
return None
# Load the SkyTable so we know in what files to look for the catalog"
_logger.debug("Using catalog found in %s" % (self.basedir))
skytable_filename = "%s/SkyTable.fits" % (self.basedir)
if (not os.path.isfile(skytable_filename)):
_logger.fatal("Unable to find catalog index file in %s!" % (self.basedir))
return None
# Read in the master index hdu
skytable_hdu = fits.open(skytable_filename)
skytable = skytable_hdu['SKY_REGION'].data
# Select entries that match our list
# print ra, dec, radius, type(ra), type(dec), type(radius)
# logger.debug("# Searching for stars within %.1f degress around %f , %f ..." % (radius, ra, dec))
if (not radius == None and radius > 0):
min_dec = dec - radius
max_dec = dec + radius
min_ra = ra - radius / math.cos(math.radians(dec))
max_ra = ra + radius / math.cos(math.radians(dec))
else:
min_dec, max_dec = dec[0], dec[1]
min_ra, max_ra = ra[0], ra[1]
_logger.debug("Querying catalog: Ra=%f...%f Dec=%f...%f" % (min_ra, max_ra, min_dec, max_dec))
if (max_ra > 360.):
# This wraps around the high end, shift all ra values by -180
# Now all search RAs are ok and around the 180, next also move the catalog values
selected = skytable['R_MIN'] < 180
skytable['R_MAX'][selected] += 360
skytable['R_MIN'][selected] += 360
if (min_ra < 0):
# Wrap around at the low end
selected = skytable['R_MAX'] > 180
skytable['R_MAX'][selected] -= 360
skytable['R_MIN'][selected] -= 360
_logger.debug("# Search radius: RA=%.1f ... %.1f DEC=%.1f ... %.1f" % (min_ra, max_ra, min_dec, max_dec))
try:
needed_catalogs = (skytable['PARENT'] > 0) & (skytable['PARENT'] < 25) & \
(skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
except KeyError:
# try without the PARENT field
needed_catalogs = (skytable['R_MAX'] > min_ra) & (skytable['R_MIN'] < max_ra) & \
(skytable['D_MAX'] > min_dec) & (skytable['D_MIN'] < max_dec)
# print skytable[needed_catalogs]
files_to_read = skytable['NAME'][needed_catalogs]
files_to_read = [f.strip() for f in files_to_read]
_logger.debug(files_to_read)
skytable_hdu.close() # Warning: might erase the loaded data, might need to copy array!
# Now quickly go over the list and take care of all filenames that still have a 0x00 in them
for i in range(len(files_to_read)):
found_at = files_to_read[i].find('\0')
if (found_at > 0):
files_to_read[i] = files_to_read[i][:found_at]
# Load all frames, one by one, and select all stars in the valid range.
# Then add them to the catalog with RAs and DECs
full_catalog = None # numpy.zeros(shape=(0,6))
catalog_filenames = []
# Start iterating though catalogs
for catalogname in files_to_read:
catalogfile = "%s/%s" % (self.basedir, catalogname)
# print catalogfile
if (not os.path.isfile(catalogfile)):
# not a file, try adding .fits to the end of the filename
if (os.path.isfile(catalogfile + ".fits")):
catalogfile += ".fits"
else:
# neither option (w/ or w/o .fits added is a file)
_logger.warning(
"Catalog file (%s) not found (base-dir: %s)" % (os.path.abspath(catalogfile), self.basedir))
continue
try:
hdu_cat = fits.open(catalogfile)
except:
_logger.warning("Unable to open catalog file %s" % (catalogfile))
continue
catalog_filenames.append(catalogfile)
_logger.debug("Adding %s to list of catalog files being used" % (catalogfile))
# read table into a nd-array buffer
cat_full = hdu_cat[1].data
hdu_cat.close()
# Read the RA and DEC values
cat_ra = cat_full['RA']
cat_dec = cat_full['DEC']
# To select the right region, shift a temporary catalog
cat_ra_shifted = cat_ra
if (max_ra > 360.):
cat_ra_shifted[cat_ra < 180] += 360
elif (min_ra < 0):
cat_ra_shifted[cat_ra > 180] -= 360
select_from_cat = (cat_ra_shifted > min_ra) & (cat_ra_shifted < max_ra) & (cat_dec > min_dec) & (
cat_dec < max_dec)
array_to_add = cat_full[select_from_cat]
_logger.debug("Read %d sources from %s" % (array_to_add.shape[0], catalogname))
if (full_catalog is None):
full_catalog = array_to_add
else:
full_catalog = np.append(full_catalog, array_to_add, axis=0)
# print photom_grizy[:3,:]
if (full_catalog is None):
_logger.warning("No stars found in area %s, %s from catalog %s" % (
str(ra), str(dec),
# ra[0], ra[1], dec[0], dec[1],
self.basedir))
else:
_logger.debug(
"Read a total of %d stars from %d catalogs!" % (full_catalog.shape[0], len(files_to_read)))
self.PS1toSDSS(full_catalog)
return full_catalog
#### Wrapper routines to use photometric zeropointing stand-alone
def | (directory, db, args):
search = "%s/*-[es][19]1.fits.fz" % (directory)
inputlist = glob.glob(search)
initialsize = len (inputlist)
rejects = []
if not args.redo:
for image in inputlist:
if db.exists(image):
rejects.append (image)
for r in rejects:
inputlist.remove (r)
print ("Found %d files intially, but cleaned %d already measured images. Starting analysis of %d files" % (initialsize, len(rejects), len(inputlist)))
photzpStage = PhotCalib(args.ps1dir)
for image in inputlist:
image = image.rstrip()
photzpStage.analyzeImage(image, outputdb=db, outputimageRootDir=args.outputimageRootDir, mintexp=args.mintexp)
def crawlSiteCameraArchive(site, camera, args, date=None):
'''
Process in the archive
:param site:
:param camera:
:param args:
:param date:
:return:
'''
if date is None:
date = '*'
if site is None:
_logger.error ("Must define a site !")
exit (1)
imagedb = photdbinterface(args.imagedbPrefix)
searchdir = "%s/%s/%s/%s/%s" % (args.rootdir, site, camera, date, args.processstatus)
# search = "%s/%s/%s/%s/preview/*-[es]11.fits.fz" % (args.rootdir, site, camera, date)
_logger.info("Searching in directories: %s" % (searchdir))
crawlDirectory(searchdir, imagedb, args)
imagedb.close();
def crawlSite(site, type, args):
""" Search for all cameras of a given type (fl, kb,fs) in a site directory and processed them """
searchdir = "%s/%s/%s*" % (args.rootdir, site, type)
cameralist = glob.glob(searchdir)
cameras = []
for candidate in cameralist:
cameras.append((site, os.path.basename(os.path.normpath(candidate))))
for setup in cameras:
for date in args.date:
print(setup[0], setup[1], date)
crawlSiteCameraArchive(setup[0], setup[1], args, date)
def parseCommandLine():
""" Read command line parameters
"""
parser = argparse.ArgumentParser(
description='Determine photometric zeropoint of banzai-reduced LCO imaging data.')
parser.add_argument('--log-level', dest='log_level', default='INFO', choices=['DEBUG', 'INFO'],
help='Set the log level')
parser.add_argument('--ps1dir', dest='ps1dir', default='~/Catalogs/ps1odi/panstarrs/',
help='Directory of PS1 catalog')
parser.add_argument("--diagnosticplotsdir", dest='outputimageRootDir', default=None,
help='Output directory for diagnostic photometry plots. No plots generated if option is omitted. This is a time consuming task. ')
parser.add_argument('--photodb', dest='imagedbPrefix', default='~/lcozpplots/lcophotzp.db',
help='Result output directory. .db file is written here')
parser.add_argument('--imagerootdir', dest='rootdir', default='/archive/engineering',
help="LCO archive root directory")
parser.add_argument('--site', dest='site', default=None, help='sites code for camera')
parser.add_argument('--mintexp', dest='mintexp', default=60, type=float, help='Minimum exposure time to accept')
parser.add_argument('--redo', action='store_true')
parser.add_argument ('--preview', dest='processstatus', default='processed', action='store_const', const='preview')
mutex = parser.add_mutually_exclusive_group()
mutex.add_argument('--date', dest='date', default=[None,], nargs='+', help='Specific date to process.')
mutex.add_argument('--lastNdays', type=int)
cameragroup = parser.add_mutually_exclusive_group()
cameragroup.add_argument('--camera', dest='camera', default=None, help='specific camera to process. ')
cameragroup.add_argument('--cameratype', dest='cameratype', default=None, choices=['fs', 'fl', 'kb'],
help='camera type to process at selected sites to process. ')
cameragroup.add_argument('--crawldirectory', default=None, type=str,
help="process all reduced image in specific directoy")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level.upper()),
format='%(asctime)s.%(msecs).03d %(levelname)7s: %(module)20s: %(message)s')
args.imagedbPrefix = os.path.expanduser(args.imagedbPrefix)
if args.outputimageRootDir is not None:
args.outputimageRootDir = os.path.expanduser(args.outputimageRootDir)
print ("Writing db to directory: %s" % args.outputimageRootDir)
if args.crawldirectory is not None:
args.crawldirectory = os.path.expanduser(args.crawldirectory)
if (args.lastNdays is not None):
args.date=[]
today = datetime.datetime.utcnow()
for ii in range (args.lastNdays):
day = today - datetime.timedelta(days=ii)
args.date.append (day.strftime("%Y%m%d"))
args.date = args.date[::-1]
args.ps1dir = os.path.expanduser(args.ps1dir)
print (args.processstatus)
return args
def photzpmain():
args = parseCommandLine()
if args.cameratype is not None:
cameras = [camera for camera in args.cameratype.split(',')]
if args.site is not None:
sites = [site for site in args.site.split(',')]
else:
sites = ('lsc', 'cpt', 'ogg', 'coj', 'tfn', 'elp', 'sqa', 'bpl')
print("Crawling through camera types ", cameras, " at sites ", sites, " for date ", args.date)
for site in sites:
for cameratype in cameras:
crawlSite(site, cameratype, args)
elif args.camera is not None:
if args.site is None:
sites = '*'
else:
sites = args.site
print("Calibrating camera ", args.camera, " at site ", sites, ' for date ', args.date)
for date in args.date:
crawlSiteCameraArchive(sites, args.camera, args, date=date)
elif args.crawldirectory is not None:
imagedb = photdbinterface("%s/%s" % (args.crawldirectory, 'imagezp.db'))
crawlDirectory(args.crawldirectory, imagedb, args)
imagedb.close()
else:
print("Need to specify either a camera, or a camera type.")
sys.exit(0)
if __name__ == '__main__':
assert sys.version_info >= (3,5)
photzpmain()
| crawlDirectory | identifier_name |
genetic.py | import copy
import random
import time
import sys
# TODO: accept user args for states, give up, etc
# TODO: set up signal handler for Ctrl-C
DEBUG = True
history = [] # a record of states
GIVE_UP = 1000 # give up after x iterations
POOL_SIZE = 10
NUM_VARS = 0
NUM_CLAUSES = 0
CNF = [] #TODO: fix evaluate function
def find_solution(variables, cnf, start_states=None, rand_state=None, rand_restarts=False, tries=None, tb=False):
global NUM_VARS
global NUM_CLAUSES
global CNF
global GIVE_UP
NUM_VARS = variables
NUM_CLAUSES = len(cnf)
CNF = cnf
#GIVE_UP = NUM_VARS**2 # 20->400, 50->2500, 100->10k
GiVE_UP = tries
if not tries:
tries = 5*NUM_VARS
if rand_state != None:
random.seed(rand_state)
gene_pool = ["" for i in range(POOL_SIZE)]
initialize_states(gene_pool, start_states) # random starting states
if DEBUG: print(gene_pool)
counter = 0 | flips = 0
while counter < GIVE_UP:
if rand_restarts and counter>0 and not (counter % restart): # random restarts
if not tb: print("restarting: (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) + ")")
initialize_states(gene_pool, gene_pool[0])
if not tb: print("iteration", counter, "\t\t", gene_pool[0])
new_pool = []
evaluations = eval_pool(gene_pool)
elite_states = add_elite(gene_pool, new_pool, evaluations)
select(gene_pool, new_pool, evaluations)
crossover(2, new_pool)
mutate(2, new_pool)
new_vals = eval_pool(new_pool)
flips += flip_heuristic2(2, new_pool, new_vals)
#record(gene_pool) # records chnges in pool to history
if False and DEBUG:
print(counter, "old:",gene_pool, evaluations)
print(counter, "new:",new_pool, new_vals)
gene_pool = new_pool
if evaluate(gene_pool[0]) == NUM_CLAUSES:
if not tb: # test bench
print(">> PROBLEM SATISFIED at iteration " + str(counter))
print(">> With solution:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
return (0, flips)
counter += 1
if not tb: # test bench
print(">> GAVE UP after " + str(GIVE_UP) + " tries.")
print(">> Current Best:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
print("UNSATISFIED CLAUSES: (1-indexed)")
for i in range(len(CNF)):
if not satisfied(CNF[i], gene_pool[0]):
print(str(i+1) + ":\t", CNF[i])
return (1, flips)
def readable(string):
new_str = ""
for i in range(len(string)):
if i%5 == 0: new_str += " "
new_str += string[i]
return new_str
def flip_coin(p=.5):
if (p < 0 or p > 1):
raise ValueError("p can only be between 0 and 1.")
return int(random.random() < p)
def sample(probs, selections):
samples = []
cdf = []
for i in range(len(probs)-1):
cdf.append(sum(probs[:i+1]))
cdf.append(1)
while len(samples) < selections:
p = random.random()
i = 0
while(p >= cdf[i]):
i += 1
samples.append(i)
return samples
def print_pool(gene_pool):
for i in range(len(gene_pool)):
print(gene_pool[i])
def record(gene_pool):
history.append(copy.deepcopy(gene_pool))
def initialize_states(gene_pool, given=None):
if given and (len(given)<len(gene_pool)):
for i in range(len(given)):
gene_pool[i] = given[i]
for i in range(len(given), POOL_SIZE):
gene_pool[i] = create_state()
else:
for i in range(POOL_SIZE):
gene_pool[i] = create_state()
def create_state():
state = ""
for i in range(NUM_VARS):
state += str(int(flip_coin()))
return state
#TODO: to be replaced with cnf evaluation function for sat problems
'''
def evaluate(state):
#result = 0
#for i in range(NUM_CLAUSES):
# if state[i] == goal[i]:
# result += 1
#return result
return cnf_eval(CNF, state)
'''
def eval_pool(pool):
evaluations = []
for i in range(len(pool)):
evaluations.append(evaluate(pool[i]))
return evaluations
def add_elite(old_pool, new_pool, evaluations):
best1 = old_pool[0]
b1_score = evaluations[0]
best2 = best1 # keep the same for now
b2_score = b1_score
for i in range(1, POOL_SIZE):
if evaluations[i] >= b1_score: # shuffles around ties
best2 = best1
b2_score = b1_score
best1 = old_pool[i]
b1_score = evaluations[i]
new_pool.append(best1)
new_pool.append(best2)
def select(gene_pool, new_pool, evaluations):
probs = [0 for i in range(POOL_SIZE)]
selections = POOL_SIZE - len(new_pool)
denom = sum(evaluations)
for i in range(POOL_SIZE):
probs[i] = evaluations[i]/denom
result = sample(probs, selections)
for i in range(selections):
new_pool.append(gene_pool[result[i]])
def crossover(safe, new_pool):
for i in range(safe, POOL_SIZE, 2):
a, b = cross(new_pool[i], new_pool[i+1])
new_pool[i] = a
new_pool[i+1] = b
def cross(x, y):
guide = ""
for i in range(len(x)):
guide += str(int(flip_coin()))
c1 = ""
c2 = ""
for i in range(len(x)):
if guide[i] == "1":
c1 += x[i]
c2 += y[i]
else:
c1 += y[i]
c2 += x[i]
return (c1, c2)
def mutate(safe, new_pool):
for i in range(safe, POOL_SIZE):
if flip_coin(.9):
mutant = ""
for j in range(len(new_pool[i])):
if flip_coin():
mutant += str(1 - int(new_pool[i][j]))
else:
mutant += new_pool[i][j]
new_pool[i] = mutant
# the actual flip heuristic (local changes)
# now, should instantly solve convex optimizations
def flip_heuristic2(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
improvement = True
order = [j for j in range(NUM_VARS)]
flips = 0
while improvement: # keeps going as long as there is improvement
improvement = False
random.shuffle(order)
for j in order:
new_str, new_eval = eval_flip(new_pool[i], j, evaluations[i])
flips +=1
if new_str: # eval flip returns None if not better
new_pool[i] = new_str
evaluations[i] = new_eval
#improvement = True
return flips
def eval_flip(string, index, evaluation):
# flipping bit at index i
new_str = string[:index] + ("1" if string[index]=="0" else "0") + string[(index+1):]
new_eval = evaluate(new_str)
if new_eval > evaluation:
return (new_str, new_eval)
return (None, None)
# not currently using
def flip_heuristic(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
flipped = flip_bits(new_pool[i])
value = evaluate(flipped)
if value >= evaluations[i]:
evaluations[i] = value
new_pool[i] = flipped
def flip_bits(string):
new_str = ""
for i in range(NUM_VARS):
new_str += "1" if string[i]=="0" else "0"
return new_str
# right now, assuming that lines end with zero, one clause per line
def read_cnf(file_name):
f = open(file_name, "r")
lines = f.read().splitlines()
variables = 0
clauses = 0
for i in range(len(lines)):
if lines[i][0] == 'p': # found problem line
variables, clauses = map(int, lines[i].split()[2:])
if DEBUG: print(variables, clauses)
lines = lines[(i+1):]
break;
cnf = []
for i in range(clauses):
cnf.append(list(map(int, lines[i].split())))
for clause in cnf: # removes 0 from the end (assumption)
if clause[-1] == 0:
clause.pop()
return (variables, cnf)
def cnf_eval(cnf, state):
sat_clauses = 0
for i in range(len(cnf)):
sat = satisfied(cnf[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
def evaluate(state):
sat_clauses = 0
for i in range(NUM_CLAUSES):
sat = satisfied(CNF[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
# simple, doesn't tell you how satisfied the clause is (don't think that matters)
def satisfied(clause, state):
#for i in range(len(clause)):
for i in range(3):
temp = -clause[i] if (clause[i] < 0) else clause[i]
# (if the variable is true) != (if the variable is negated)
truthy = (state[temp-1]=="1") != (temp==clause[i])
if truthy:
return True
return False
######
def generate_tsp(cities, rand_state=None):
if rand_state != None:
random.seed(rand_state)
l = [] # list
c_list = set()
rand = random.randrange # localizing function
for i in range(cities):
while True:
x = rand(100)
y = rand(100)
if (x,y) not in c_list:
break
l.append((i, (x, y)))
c_list.add((x,y))
return l
def init_adj(cities): # create adjacency matrix
num = len(cities)
inf = float('inf')
res = [[0 for i in range(num)] for j in range(num)]
for i in range(num):
for j in range(num):
res[i][j] = calc_dist(cities[i][1], cities[j][1])
res[j][i] = res[i][j]
for i in range(num):
res[i][i] = inf
return res
def calc_dist(a, b):
return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**.5
def closest(a, adj):
return min(adj[a][:])
def find_closest(a, others, adj):
min_dist = 1000
closest = -1
for i in others:
if adj[a][i] < min_dist:
min_dist = adj[a][i]
closest = i
return closest
def local_tsp(cities, adj):
return
if __name__ == '__main__':
import cProfile
import pstats
#variables, cnf = read_cnf("uf50-218/uf50-01.cnf")
variables, cnf = read_cnf("uf100-430/uf100-01.cnf")
pr = cProfile.Profile()
pr.enable()
#start_t = time.clock()
find_solution(variables, cnf, start_states=None, rand_state=0, rand_restarts=True)
#stop_t = time.clock()
#print("TIME ELAPSED: " + str(stop_t - start_t) + " seconds.")
pr.disable()
pstats.Stats(pr).print_stats() | restart = int(tries/5) | random_line_split |
genetic.py | import copy
import random
import time
import sys
# TODO: accept user args for states, give up, etc
# TODO: set up signal handler for Ctrl-C
DEBUG = True
history = [] # a record of states
GIVE_UP = 1000 # give up after x iterations
POOL_SIZE = 10
NUM_VARS = 0
NUM_CLAUSES = 0
CNF = [] #TODO: fix evaluate function
def find_solution(variables, cnf, start_states=None, rand_state=None, rand_restarts=False, tries=None, tb=False):
global NUM_VARS
global NUM_CLAUSES
global CNF
global GIVE_UP
NUM_VARS = variables
NUM_CLAUSES = len(cnf)
CNF = cnf
#GIVE_UP = NUM_VARS**2 # 20->400, 50->2500, 100->10k
GiVE_UP = tries
if not tries:
tries = 5*NUM_VARS
if rand_state != None:
random.seed(rand_state)
gene_pool = ["" for i in range(POOL_SIZE)]
initialize_states(gene_pool, start_states) # random starting states
if DEBUG: print(gene_pool)
counter = 0
restart = int(tries/5)
flips = 0
while counter < GIVE_UP:
if rand_restarts and counter>0 and not (counter % restart): # random restarts
if not tb: print("restarting: (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) + ")")
initialize_states(gene_pool, gene_pool[0])
if not tb: print("iteration", counter, "\t\t", gene_pool[0])
new_pool = []
evaluations = eval_pool(gene_pool)
elite_states = add_elite(gene_pool, new_pool, evaluations)
select(gene_pool, new_pool, evaluations)
crossover(2, new_pool)
mutate(2, new_pool)
new_vals = eval_pool(new_pool)
flips += flip_heuristic2(2, new_pool, new_vals)
#record(gene_pool) # records chnges in pool to history
if False and DEBUG:
print(counter, "old:",gene_pool, evaluations)
print(counter, "new:",new_pool, new_vals)
gene_pool = new_pool
if evaluate(gene_pool[0]) == NUM_CLAUSES:
if not tb: # test bench
print(">> PROBLEM SATISFIED at iteration " + str(counter))
print(">> With solution:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
return (0, flips)
counter += 1
if not tb: # test bench
print(">> GAVE UP after " + str(GIVE_UP) + " tries.")
print(">> Current Best:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
print("UNSATISFIED CLAUSES: (1-indexed)")
for i in range(len(CNF)):
if not satisfied(CNF[i], gene_pool[0]):
print(str(i+1) + ":\t", CNF[i])
return (1, flips)
def readable(string):
new_str = ""
for i in range(len(string)):
if i%5 == 0: new_str += " "
new_str += string[i]
return new_str
def flip_coin(p=.5):
if (p < 0 or p > 1):
raise ValueError("p can only be between 0 and 1.")
return int(random.random() < p)
def sample(probs, selections):
samples = []
cdf = []
for i in range(len(probs)-1):
cdf.append(sum(probs[:i+1]))
cdf.append(1)
while len(samples) < selections:
p = random.random()
i = 0
while(p >= cdf[i]):
i += 1
samples.append(i)
return samples
def print_pool(gene_pool):
for i in range(len(gene_pool)):
print(gene_pool[i])
def record(gene_pool):
history.append(copy.deepcopy(gene_pool))
def initialize_states(gene_pool, given=None):
if given and (len(given)<len(gene_pool)):
for i in range(len(given)):
gene_pool[i] = given[i]
for i in range(len(given), POOL_SIZE):
gene_pool[i] = create_state()
else:
for i in range(POOL_SIZE):
gene_pool[i] = create_state()
def create_state():
state = ""
for i in range(NUM_VARS):
state += str(int(flip_coin()))
return state
#TODO: to be replaced with cnf evaluation function for sat problems
'''
def evaluate(state):
#result = 0
#for i in range(NUM_CLAUSES):
# if state[i] == goal[i]:
# result += 1
#return result
return cnf_eval(CNF, state)
'''
def eval_pool(pool):
evaluations = []
for i in range(len(pool)):
evaluations.append(evaluate(pool[i]))
return evaluations
def add_elite(old_pool, new_pool, evaluations):
best1 = old_pool[0]
b1_score = evaluations[0]
best2 = best1 # keep the same for now
b2_score = b1_score
for i in range(1, POOL_SIZE):
if evaluations[i] >= b1_score: # shuffles around ties
best2 = best1
b2_score = b1_score
best1 = old_pool[i]
b1_score = evaluations[i]
new_pool.append(best1)
new_pool.append(best2)
def select(gene_pool, new_pool, evaluations):
probs = [0 for i in range(POOL_SIZE)]
selections = POOL_SIZE - len(new_pool)
denom = sum(evaluations)
for i in range(POOL_SIZE):
probs[i] = evaluations[i]/denom
result = sample(probs, selections)
for i in range(selections):
new_pool.append(gene_pool[result[i]])
def crossover(safe, new_pool):
for i in range(safe, POOL_SIZE, 2):
a, b = cross(new_pool[i], new_pool[i+1])
new_pool[i] = a
new_pool[i+1] = b
def cross(x, y):
guide = ""
for i in range(len(x)):
guide += str(int(flip_coin()))
c1 = ""
c2 = ""
for i in range(len(x)):
if guide[i] == "1":
c1 += x[i]
c2 += y[i]
else:
c1 += y[i]
c2 += x[i]
return (c1, c2)
def mutate(safe, new_pool):
for i in range(safe, POOL_SIZE):
if flip_coin(.9):
mutant = ""
for j in range(len(new_pool[i])):
if flip_coin():
mutant += str(1 - int(new_pool[i][j]))
else:
mutant += new_pool[i][j]
new_pool[i] = mutant
# the actual flip heuristic (local changes)
# now, should instantly solve convex optimizations
def flip_heuristic2(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
improvement = True
order = [j for j in range(NUM_VARS)]
flips = 0
while improvement: # keeps going as long as there is improvement
improvement = False
random.shuffle(order)
for j in order:
new_str, new_eval = eval_flip(new_pool[i], j, evaluations[i])
flips +=1
if new_str: # eval flip returns None if not better
new_pool[i] = new_str
evaluations[i] = new_eval
#improvement = True
return flips
def eval_flip(string, index, evaluation):
# flipping bit at index i
|
# not currently using
def flip_heuristic(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
flipped = flip_bits(new_pool[i])
value = evaluate(flipped)
if value >= evaluations[i]:
evaluations[i] = value
new_pool[i] = flipped
def flip_bits(string):
new_str = ""
for i in range(NUM_VARS):
new_str += "1" if string[i]=="0" else "0"
return new_str
# right now, assuming that lines end with zero, one clause per line
def read_cnf(file_name):
f = open(file_name, "r")
lines = f.read().splitlines()
variables = 0
clauses = 0
for i in range(len(lines)):
if lines[i][0] == 'p': # found problem line
variables, clauses = map(int, lines[i].split()[2:])
if DEBUG: print(variables, clauses)
lines = lines[(i+1):]
break;
cnf = []
for i in range(clauses):
cnf.append(list(map(int, lines[i].split())))
for clause in cnf: # removes 0 from the end (assumption)
if clause[-1] == 0:
clause.pop()
return (variables, cnf)
def cnf_eval(cnf, state):
sat_clauses = 0
for i in range(len(cnf)):
sat = satisfied(cnf[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
def evaluate(state):
sat_clauses = 0
for i in range(NUM_CLAUSES):
sat = satisfied(CNF[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
# simple, doesn't tell you how satisfied the clause is (don't think that matters)
def satisfied(clause, state):
#for i in range(len(clause)):
for i in range(3):
temp = -clause[i] if (clause[i] < 0) else clause[i]
# (if the variable is true) != (if the variable is negated)
truthy = (state[temp-1]=="1") != (temp==clause[i])
if truthy:
return True
return False
######
def generate_tsp(cities, rand_state=None):
if rand_state != None:
random.seed(rand_state)
l = [] # list
c_list = set()
rand = random.randrange # localizing function
for i in range(cities):
while True:
x = rand(100)
y = rand(100)
if (x,y) not in c_list:
break
l.append((i, (x, y)))
c_list.add((x,y))
return l
def init_adj(cities): # create adjacency matrix
num = len(cities)
inf = float('inf')
res = [[0 for i in range(num)] for j in range(num)]
for i in range(num):
for j in range(num):
res[i][j] = calc_dist(cities[i][1], cities[j][1])
res[j][i] = res[i][j]
for i in range(num):
res[i][i] = inf
return res
def calc_dist(a, b):
return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**.5
def closest(a, adj):
return min(adj[a][:])
def find_closest(a, others, adj):
min_dist = 1000
closest = -1
for i in others:
if adj[a][i] < min_dist:
min_dist = adj[a][i]
closest = i
return closest
def local_tsp(cities, adj):
return
if __name__ == '__main__':
import cProfile
import pstats
#variables, cnf = read_cnf("uf50-218/uf50-01.cnf")
variables, cnf = read_cnf("uf100-430/uf100-01.cnf")
pr = cProfile.Profile()
pr.enable()
#start_t = time.clock()
find_solution(variables, cnf, start_states=None, rand_state=0, rand_restarts=True)
#stop_t = time.clock()
#print("TIME ELAPSED: " + str(stop_t - start_t) + " seconds.")
pr.disable()
pstats.Stats(pr).print_stats()
| new_str = string[:index] + ("1" if string[index]=="0" else "0") + string[(index+1):]
new_eval = evaluate(new_str)
if new_eval > evaluation:
return (new_str, new_eval)
return (None, None) | identifier_body |
genetic.py | import copy
import random
import time
import sys
# TODO: accept user args for states, give up, etc
# TODO: set up signal handler for Ctrl-C
DEBUG = True
history = [] # a record of states
GIVE_UP = 1000 # give up after x iterations
POOL_SIZE = 10
NUM_VARS = 0
NUM_CLAUSES = 0
CNF = [] #TODO: fix evaluate function
def find_solution(variables, cnf, start_states=None, rand_state=None, rand_restarts=False, tries=None, tb=False):
global NUM_VARS
global NUM_CLAUSES
global CNF
global GIVE_UP
NUM_VARS = variables
NUM_CLAUSES = len(cnf)
CNF = cnf
#GIVE_UP = NUM_VARS**2 # 20->400, 50->2500, 100->10k
GiVE_UP = tries
if not tries:
tries = 5*NUM_VARS
if rand_state != None:
random.seed(rand_state)
gene_pool = ["" for i in range(POOL_SIZE)]
initialize_states(gene_pool, start_states) # random starting states
if DEBUG: print(gene_pool)
counter = 0
restart = int(tries/5)
flips = 0
while counter < GIVE_UP:
if rand_restarts and counter>0 and not (counter % restart): # random restarts
if not tb: print("restarting: (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) + ")")
initialize_states(gene_pool, gene_pool[0])
if not tb: print("iteration", counter, "\t\t", gene_pool[0])
new_pool = []
evaluations = eval_pool(gene_pool)
elite_states = add_elite(gene_pool, new_pool, evaluations)
select(gene_pool, new_pool, evaluations)
crossover(2, new_pool)
mutate(2, new_pool)
new_vals = eval_pool(new_pool)
flips += flip_heuristic2(2, new_pool, new_vals)
#record(gene_pool) # records chnges in pool to history
if False and DEBUG:
print(counter, "old:",gene_pool, evaluations)
print(counter, "new:",new_pool, new_vals)
gene_pool = new_pool
if evaluate(gene_pool[0]) == NUM_CLAUSES:
if not tb: # test bench
print(">> PROBLEM SATISFIED at iteration " + str(counter))
print(">> With solution:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
return (0, flips)
counter += 1
if not tb: # test bench
print(">> GAVE UP after " + str(GIVE_UP) + " tries.")
print(">> Current Best:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
print("UNSATISFIED CLAUSES: (1-indexed)")
for i in range(len(CNF)):
if not satisfied(CNF[i], gene_pool[0]):
print(str(i+1) + ":\t", CNF[i])
return (1, flips)
def readable(string):
new_str = ""
for i in range(len(string)):
if i%5 == 0: new_str += " "
new_str += string[i]
return new_str
def flip_coin(p=.5):
if (p < 0 or p > 1):
raise ValueError("p can only be between 0 and 1.")
return int(random.random() < p)
def sample(probs, selections):
samples = []
cdf = []
for i in range(len(probs)-1):
cdf.append(sum(probs[:i+1]))
cdf.append(1)
while len(samples) < selections:
p = random.random()
i = 0
while(p >= cdf[i]):
i += 1
samples.append(i)
return samples
def print_pool(gene_pool):
for i in range(len(gene_pool)):
print(gene_pool[i])
def record(gene_pool):
history.append(copy.deepcopy(gene_pool))
def initialize_states(gene_pool, given=None):
if given and (len(given)<len(gene_pool)):
for i in range(len(given)):
gene_pool[i] = given[i]
for i in range(len(given), POOL_SIZE):
gene_pool[i] = create_state()
else:
for i in range(POOL_SIZE):
gene_pool[i] = create_state()
def create_state():
state = ""
for i in range(NUM_VARS):
state += str(int(flip_coin()))
return state
#TODO: to be replaced with cnf evaluation function for sat problems
'''
def evaluate(state):
#result = 0
#for i in range(NUM_CLAUSES):
# if state[i] == goal[i]:
# result += 1
#return result
return cnf_eval(CNF, state)
'''
def eval_pool(pool):
evaluations = []
for i in range(len(pool)):
evaluations.append(evaluate(pool[i]))
return evaluations
def add_elite(old_pool, new_pool, evaluations):
best1 = old_pool[0]
b1_score = evaluations[0]
best2 = best1 # keep the same for now
b2_score = b1_score
for i in range(1, POOL_SIZE):
if evaluations[i] >= b1_score: # shuffles around ties
best2 = best1
b2_score = b1_score
best1 = old_pool[i]
b1_score = evaluations[i]
new_pool.append(best1)
new_pool.append(best2)
def select(gene_pool, new_pool, evaluations):
probs = [0 for i in range(POOL_SIZE)]
selections = POOL_SIZE - len(new_pool)
denom = sum(evaluations)
for i in range(POOL_SIZE):
probs[i] = evaluations[i]/denom
result = sample(probs, selections)
for i in range(selections):
new_pool.append(gene_pool[result[i]])
def crossover(safe, new_pool):
for i in range(safe, POOL_SIZE, 2):
a, b = cross(new_pool[i], new_pool[i+1])
new_pool[i] = a
new_pool[i+1] = b
def cross(x, y):
guide = ""
for i in range(len(x)):
guide += str(int(flip_coin()))
c1 = ""
c2 = ""
for i in range(len(x)):
if guide[i] == "1":
c1 += x[i]
c2 += y[i]
else:
c1 += y[i]
c2 += x[i]
return (c1, c2)
def mutate(safe, new_pool):
for i in range(safe, POOL_SIZE):
if flip_coin(.9):
mutant = ""
for j in range(len(new_pool[i])):
if flip_coin():
mutant += str(1 - int(new_pool[i][j]))
else:
mutant += new_pool[i][j]
new_pool[i] = mutant
# the actual flip heuristic (local changes)
# now, should instantly solve convex optimizations
def flip_heuristic2(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
improvement = True
order = [j for j in range(NUM_VARS)]
flips = 0
while improvement: # keeps going as long as there is improvement
improvement = False
random.shuffle(order)
for j in order:
new_str, new_eval = eval_flip(new_pool[i], j, evaluations[i])
flips +=1
if new_str: # eval flip returns None if not better
new_pool[i] = new_str
evaluations[i] = new_eval
#improvement = True
return flips
def eval_flip(string, index, evaluation):
# flipping bit at index i
new_str = string[:index] + ("1" if string[index]=="0" else "0") + string[(index+1):]
new_eval = evaluate(new_str)
if new_eval > evaluation:
return (new_str, new_eval)
return (None, None)
# not currently using
def flip_heuristic(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
flipped = flip_bits(new_pool[i])
value = evaluate(flipped)
if value >= evaluations[i]:
evaluations[i] = value
new_pool[i] = flipped
def flip_bits(string):
new_str = ""
for i in range(NUM_VARS):
new_str += "1" if string[i]=="0" else "0"
return new_str
# right now, assuming that lines end with zero, one clause per line
def read_cnf(file_name):
f = open(file_name, "r")
lines = f.read().splitlines()
variables = 0
clauses = 0
for i in range(len(lines)):
if lines[i][0] == 'p': # found problem line
variables, clauses = map(int, lines[i].split()[2:])
if DEBUG: print(variables, clauses)
lines = lines[(i+1):]
break;
cnf = []
for i in range(clauses):
cnf.append(list(map(int, lines[i].split())))
for clause in cnf: # removes 0 from the end (assumption)
if clause[-1] == 0:
clause.pop()
return (variables, cnf)
def cnf_eval(cnf, state):
sat_clauses = 0
for i in range(len(cnf)):
sat = satisfied(cnf[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
def evaluate(state):
sat_clauses = 0
for i in range(NUM_CLAUSES):
sat = satisfied(CNF[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
# simple, doesn't tell you how satisfied the clause is (don't think that matters)
def satisfied(clause, state):
#for i in range(len(clause)):
for i in range(3):
temp = -clause[i] if (clause[i] < 0) else clause[i]
# (if the variable is true) != (if the variable is negated)
truthy = (state[temp-1]=="1") != (temp==clause[i])
if truthy:
return True
return False
######
def generate_tsp(cities, rand_state=None):
if rand_state != None:
random.seed(rand_state)
l = [] # list
c_list = set()
rand = random.randrange # localizing function
for i in range(cities):
while True:
x = rand(100)
y = rand(100)
if (x,y) not in c_list:
break
l.append((i, (x, y)))
c_list.add((x,y))
return l
def init_adj(cities): # create adjacency matrix
num = len(cities)
inf = float('inf')
res = [[0 for i in range(num)] for j in range(num)]
for i in range(num):
|
for i in range(num):
res[i][i] = inf
return res
def calc_dist(a, b):
return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**.5
def closest(a, adj):
return min(adj[a][:])
def find_closest(a, others, adj):
min_dist = 1000
closest = -1
for i in others:
if adj[a][i] < min_dist:
min_dist = adj[a][i]
closest = i
return closest
def local_tsp(cities, adj):
return
if __name__ == '__main__':
import cProfile
import pstats
#variables, cnf = read_cnf("uf50-218/uf50-01.cnf")
variables, cnf = read_cnf("uf100-430/uf100-01.cnf")
pr = cProfile.Profile()
pr.enable()
#start_t = time.clock()
find_solution(variables, cnf, start_states=None, rand_state=0, rand_restarts=True)
#stop_t = time.clock()
#print("TIME ELAPSED: " + str(stop_t - start_t) + " seconds.")
pr.disable()
pstats.Stats(pr).print_stats()
| for j in range(num):
res[i][j] = calc_dist(cities[i][1], cities[j][1])
res[j][i] = res[i][j] | conditional_block |
genetic.py | import copy
import random
import time
import sys
# TODO: accept user args for states, give up, etc
# TODO: set up signal handler for Ctrl-C
DEBUG = True
history = [] # a record of states
GIVE_UP = 1000 # give up after x iterations
POOL_SIZE = 10
NUM_VARS = 0
NUM_CLAUSES = 0
CNF = [] #TODO: fix evaluate function
def find_solution(variables, cnf, start_states=None, rand_state=None, rand_restarts=False, tries=None, tb=False):
global NUM_VARS
global NUM_CLAUSES
global CNF
global GIVE_UP
NUM_VARS = variables
NUM_CLAUSES = len(cnf)
CNF = cnf
#GIVE_UP = NUM_VARS**2 # 20->400, 50->2500, 100->10k
GiVE_UP = tries
if not tries:
tries = 5*NUM_VARS
if rand_state != None:
random.seed(rand_state)
gene_pool = ["" for i in range(POOL_SIZE)]
initialize_states(gene_pool, start_states) # random starting states
if DEBUG: print(gene_pool)
counter = 0
restart = int(tries/5)
flips = 0
while counter < GIVE_UP:
if rand_restarts and counter>0 and not (counter % restart): # random restarts
if not tb: print("restarting: (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) + ")")
initialize_states(gene_pool, gene_pool[0])
if not tb: print("iteration", counter, "\t\t", gene_pool[0])
new_pool = []
evaluations = eval_pool(gene_pool)
elite_states = add_elite(gene_pool, new_pool, evaluations)
select(gene_pool, new_pool, evaluations)
crossover(2, new_pool)
mutate(2, new_pool)
new_vals = eval_pool(new_pool)
flips += flip_heuristic2(2, new_pool, new_vals)
#record(gene_pool) # records chnges in pool to history
if False and DEBUG:
print(counter, "old:",gene_pool, evaluations)
print(counter, "new:",new_pool, new_vals)
gene_pool = new_pool
if evaluate(gene_pool[0]) == NUM_CLAUSES:
if not tb: # test bench
print(">> PROBLEM SATISFIED at iteration " + str(counter))
print(">> With solution:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
return (0, flips)
counter += 1
if not tb: # test bench
print(">> GAVE UP after " + str(GIVE_UP) + " tries.")
print(">> Current Best:", readable(gene_pool[0]))
print(">> Satisfied (" + str(new_vals[0]) + "/" + str(NUM_CLAUSES) +") clauses.")
print("UNSATISFIED CLAUSES: (1-indexed)")
for i in range(len(CNF)):
if not satisfied(CNF[i], gene_pool[0]):
print(str(i+1) + ":\t", CNF[i])
return (1, flips)
def readable(string):
new_str = ""
for i in range(len(string)):
if i%5 == 0: new_str += " "
new_str += string[i]
return new_str
def flip_coin(p=.5):
if (p < 0 or p > 1):
raise ValueError("p can only be between 0 and 1.")
return int(random.random() < p)
def sample(probs, selections):
samples = []
cdf = []
for i in range(len(probs)-1):
cdf.append(sum(probs[:i+1]))
cdf.append(1)
while len(samples) < selections:
p = random.random()
i = 0
while(p >= cdf[i]):
i += 1
samples.append(i)
return samples
def print_pool(gene_pool):
for i in range(len(gene_pool)):
print(gene_pool[i])
def record(gene_pool):
history.append(copy.deepcopy(gene_pool))
def initialize_states(gene_pool, given=None):
if given and (len(given)<len(gene_pool)):
for i in range(len(given)):
gene_pool[i] = given[i]
for i in range(len(given), POOL_SIZE):
gene_pool[i] = create_state()
else:
for i in range(POOL_SIZE):
gene_pool[i] = create_state()
def create_state():
state = ""
for i in range(NUM_VARS):
state += str(int(flip_coin()))
return state
#TODO: to be replaced with cnf evaluation function for sat problems
'''
def evaluate(state):
#result = 0
#for i in range(NUM_CLAUSES):
# if state[i] == goal[i]:
# result += 1
#return result
return cnf_eval(CNF, state)
'''
def eval_pool(pool):
evaluations = []
for i in range(len(pool)):
evaluations.append(evaluate(pool[i]))
return evaluations
def add_elite(old_pool, new_pool, evaluations):
best1 = old_pool[0]
b1_score = evaluations[0]
best2 = best1 # keep the same for now
b2_score = b1_score
for i in range(1, POOL_SIZE):
if evaluations[i] >= b1_score: # shuffles around ties
best2 = best1
b2_score = b1_score
best1 = old_pool[i]
b1_score = evaluations[i]
new_pool.append(best1)
new_pool.append(best2)
def select(gene_pool, new_pool, evaluations):
probs = [0 for i in range(POOL_SIZE)]
selections = POOL_SIZE - len(new_pool)
denom = sum(evaluations)
for i in range(POOL_SIZE):
probs[i] = evaluations[i]/denom
result = sample(probs, selections)
for i in range(selections):
new_pool.append(gene_pool[result[i]])
def crossover(safe, new_pool):
for i in range(safe, POOL_SIZE, 2):
a, b = cross(new_pool[i], new_pool[i+1])
new_pool[i] = a
new_pool[i+1] = b
def cross(x, y):
guide = ""
for i in range(len(x)):
guide += str(int(flip_coin()))
c1 = ""
c2 = ""
for i in range(len(x)):
if guide[i] == "1":
c1 += x[i]
c2 += y[i]
else:
c1 += y[i]
c2 += x[i]
return (c1, c2)
def | (safe, new_pool):
for i in range(safe, POOL_SIZE):
if flip_coin(.9):
mutant = ""
for j in range(len(new_pool[i])):
if flip_coin():
mutant += str(1 - int(new_pool[i][j]))
else:
mutant += new_pool[i][j]
new_pool[i] = mutant
# the actual flip heuristic (local changes)
# now, should instantly solve convex optimizations
def flip_heuristic2(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
improvement = True
order = [j for j in range(NUM_VARS)]
flips = 0
while improvement: # keeps going as long as there is improvement
improvement = False
random.shuffle(order)
for j in order:
new_str, new_eval = eval_flip(new_pool[i], j, evaluations[i])
flips +=1
if new_str: # eval flip returns None if not better
new_pool[i] = new_str
evaluations[i] = new_eval
#improvement = True
return flips
def eval_flip(string, index, evaluation):
# flipping bit at index i
new_str = string[:index] + ("1" if string[index]=="0" else "0") + string[(index+1):]
new_eval = evaluate(new_str)
if new_eval > evaluation:
return (new_str, new_eval)
return (None, None)
# not currently using
def flip_heuristic(safe, new_pool, evaluations):
for i in range(safe, POOL_SIZE):
flipped = flip_bits(new_pool[i])
value = evaluate(flipped)
if value >= evaluations[i]:
evaluations[i] = value
new_pool[i] = flipped
def flip_bits(string):
new_str = ""
for i in range(NUM_VARS):
new_str += "1" if string[i]=="0" else "0"
return new_str
# right now, assuming that lines end with zero, one clause per line
def read_cnf(file_name):
f = open(file_name, "r")
lines = f.read().splitlines()
variables = 0
clauses = 0
for i in range(len(lines)):
if lines[i][0] == 'p': # found problem line
variables, clauses = map(int, lines[i].split()[2:])
if DEBUG: print(variables, clauses)
lines = lines[(i+1):]
break;
cnf = []
for i in range(clauses):
cnf.append(list(map(int, lines[i].split())))
for clause in cnf: # removes 0 from the end (assumption)
if clause[-1] == 0:
clause.pop()
return (variables, cnf)
def cnf_eval(cnf, state):
sat_clauses = 0
for i in range(len(cnf)):
sat = satisfied(cnf[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
def evaluate(state):
sat_clauses = 0
for i in range(NUM_CLAUSES):
sat = satisfied(CNF[i], state)
#if DEBUG: print(i, sat_clauses, "c:",cnf[i],"s:",state, sat)
if sat:
sat_clauses += 1
return sat_clauses
# simple, doesn't tell you how satisfied the clause is (don't think that matters)
def satisfied(clause, state):
#for i in range(len(clause)):
for i in range(3):
temp = -clause[i] if (clause[i] < 0) else clause[i]
# (if the variable is true) != (if the variable is negated)
truthy = (state[temp-1]=="1") != (temp==clause[i])
if truthy:
return True
return False
######
def generate_tsp(cities, rand_state=None):
if rand_state != None:
random.seed(rand_state)
l = [] # list
c_list = set()
rand = random.randrange # localizing function
for i in range(cities):
while True:
x = rand(100)
y = rand(100)
if (x,y) not in c_list:
break
l.append((i, (x, y)))
c_list.add((x,y))
return l
def init_adj(cities): # create adjacency matrix
num = len(cities)
inf = float('inf')
res = [[0 for i in range(num)] for j in range(num)]
for i in range(num):
for j in range(num):
res[i][j] = calc_dist(cities[i][1], cities[j][1])
res[j][i] = res[i][j]
for i in range(num):
res[i][i] = inf
return res
def calc_dist(a, b):
return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**.5
def closest(a, adj):
return min(adj[a][:])
def find_closest(a, others, adj):
min_dist = 1000
closest = -1
for i in others:
if adj[a][i] < min_dist:
min_dist = adj[a][i]
closest = i
return closest
def local_tsp(cities, adj):
return
if __name__ == '__main__':
import cProfile
import pstats
#variables, cnf = read_cnf("uf50-218/uf50-01.cnf")
variables, cnf = read_cnf("uf100-430/uf100-01.cnf")
pr = cProfile.Profile()
pr.enable()
#start_t = time.clock()
find_solution(variables, cnf, start_states=None, rand_state=0, rand_restarts=True)
#stop_t = time.clock()
#print("TIME ELAPSED: " + str(stop_t - start_t) + " seconds.")
pr.disable()
pstats.Stats(pr).print_stats()
| mutate | identifier_name |
Data_Exploration.py | from __future__ import print_function, division, unicode_literals
from datetime import date, datetime
import simplejson
from flask import Flask, request, jsonify
from www.archive.datasources import AsterixDataSource
from www.archive.datasources import SolrDataSource
from www.archive.datasources import convertToIn
from www.archive.middleware import VirtualIntegrationSchema
from www.archive.middleware import WebSession
#from sentiment_polarity import Sales_Reviews
import pandas as pd
from urllib import parse, request
import json
from json import loads
import psycopg2
from sqlalchemy import create_engine, text
import pysolr
from textblob import TextBlob as tb
app = Flask(__name__)
@app.route("/")
def Hello():
return "Hello World!"
@app.route('/api/service', methods=['POST'])
def api_service():
query = request.get_json(silent=True)
# needs to change to reading from xml file
xml = VirtualIntegrationSchema()
web_session = WebSession(xml)
return jsonify(web_session.get_result_sets(query))
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
@app.route("/api/web_method/<format>")
def api_web_method(format):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select *
from orderlines o, products p
where o.productid = p.productid
LIMIT 10
"""
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {}
for item in request.args:
|
l.append(d)
#
theresult_json = json.dumps(l, default=json_serial)
conn.close()
return theresult_json
@app.route("/api/correlation/<col1>/<col2>")
def Correlation(col1, col2):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select corr(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/covariance/<col1>/<col2>", methods=['GET'])
def Covariance(col1, col2):
"""Determine the covariance coefficient between two columns."""
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select covar_samp(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/histogram/<groupby>/<count>")
def Histogram(groupby, count):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT %s AS Group, count(%s) AS Count
FROM orders
Group by %s
Order by count(%s) DESC
""" % (groupby, count, groupby, count)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'Group': result[0], 'Count': result[1]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return theresult_json
@app.route('/api/add_message/<uuid>', methods=['GET', 'POST'])
def add_message(uuid):
content = request.get_json(silent=True)
print (content)
return jsonify('{"h" : "ok"}')
@app.route('/api/asterixwrap', methods=['GET'])
def api_asterixwrap():
sql = """USE AstxDB;
select * from TableBT v;
"""
ads = AsterixDataSource()
jsonobj = ads.execute(sql)
return jsonify(jsonobj)
def getNodeIds(category_list):
_where = ' OR '.join(['user.category.nested.nested.level_2 = "{0}"'.format(x) for x in category_list])
sql="""
use bookstore_dp;
select user.nodeID
from ClassificationInfo user
WHERE {0}
""".format(_where)
# where user.category.nested.nested.level_2 = "Education & Reference";
ads = AsterixDataSource(host="132.249.238.32")
jsonobj = ads.execute(sql)
return jsonobj
@app.route("/api/highest_monthly_sales_by_category/<list>")
def api_highest_monthly_sales_by_category(list):
category_list = list.split(",")
# print(category_list)
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
#engine = create_engine('postgresql+psycopg2://postgres@45.79.91.219/MyBookStore')
#
conn = engine.connect()
#
_jlist = getNodeIds(category_list) # will be replaced by asterix call once connected to DB - the result will not change though
print(_jlist)
_inStr = convertToIn(_jlist)
# print(_inStr)
#
sql = """
SELECT mon, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.productid, EXTRACT(MONTH from billdate)
order by p.productid
) monthlysales
WHERE category IN {0}
GROUP By mon
ORDER BY num_sold DESC
""".format(_inStr)
#
print(sql)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'mon': int(result[0]), 'num_sold' : int(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return (theresult_json)
@app.route('/api/solrwrap', methods=['GET'])
def api_solrwrap():
q = "*:*"
ads = SolrDataSource()
jsonobj = ads.execute(q)
return jsonify(jsonobj)
@app.route("/api/Top_Categories/<num_categories>/<months>")
def Top_categories(num_categories, months):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthlysales
WHERE mon in ({0})
GROUP BY category
ORDER BY num_sold DESC
LIMIT ({1})
""".format(months,num_categories)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0], 'num_sold': float(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Discontinue_Stocking/<threshold>/<startyear>/<endyear>")
def Discontinue_Stocking(threshold, startyear, endyear):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category
FROM (
select EXTRACT(YEAR from o.billdate) as yr,
p.nodeid as category, sum(o.numunits) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid , EXTRACT(YEAR from o.billdate)
order by p.nodeid
) yearly_sales
where books_sold < {0} AND (yr < {2} AND yr >= {1})
""".format(threshold,startyear, endyear)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Downward_Sales/<season>")
def Downware_Sales(season):
# return jsonify({ "n" : num_categories, "m" : months})
seasons = {'spring':(3,4,5),
'summer':(6,7,8),
'fall':(9,10,11),
'winter':(12,1,2)
}
seasontrend = seasons.get((season.lower()))
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT s.category, round(avg(s.change_in_sales_from_last_month)) AS sale_trend
FROM
(
SELECT category, mon, count(mon) OVER (PARTITION BY category) as num_months,
books_sold - lag(books_sold,1) over (PARTITION BY category ORDER BY mon) as change_in_sales_from_last_month
FROM( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthly_sales
WHERE mon in {0}
GROUP BY category, mon, books_sold
) AS s
WHERE s.num_months = {1}
AND s.mon in {0}
GROUP BY s.category
having round(avg(s.change_in_sales_from_last_month)) < 0
ORDER BY sale_trend ASC
""".format(seasontrend,len(seasontrend))
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0],'SaleTrend': result[1]}
l.append(d)
theresult_json = simplejson.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route('/sentiment_polarity/<category>/<month>', methods=['GET'])
def Sentiment_Polarity(category, month):
sr = Sales_Reviews(category, month)
return sr
def Sales_Reviews(category, month):
# AsterixDBConnection
class QueryResponse:
def __init__(self, raw_response):
self._json = loads(raw_response)
self.requestID = self._json['requestID'] if 'requestID' in self._json else None
self.clientContextID = self._json['clientContextID'] if 'clientContextID' in self._json else None
self.signature = self._json['signature'] if 'signature' in self._json else None
self.results = self._json['results'] if 'results' in self._json else None
self.metrics = self._json['metrics'] if 'metrics' in self._json else None
class AsterixConnection:
def __init__(self, server='http://45.79.91.219', port=19002):
self._server = server
self._port = port
self._url_base = self._server + ':' + str(port)
def query(self, statement, pretty=False, client_context_id=None):
endpoint = '/query/service'
url = self._url_base + endpoint
payload = {
'statement': statement,
'pretty': pretty
}
if client_context_id:
payload['client_context_id'] = client_context_id
data = parse.urlencode(payload).encode("utf-8")
req = request.Request(url, data)
response = request.urlopen(req).read()
return QueryResponse(response)
asterix_conn = AsterixConnection()
axquery = '''
use bookstore_dp;
SELECT * from ClassificationInfo where classification LIKE "%{0}%";'''.format(category)
response = asterix_conn.query(axquery)
df = pd.DataFrame(response.results)
node_id = []
for i in range(df.shape[0]):
a = df.ClassificationInfo[i]['nodeID']
node_id.append(a)
node_id = [str(x) for x in node_id]
node_id = set(node_id)
conn_string = "host='45.79.91.219' dbname='MyBookStore' user='postgres' password=''"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print ("Connected!\n")
sql = "SELECT DISTINCT o.productid, o.billdate, o.numunits, p.asin,p.nodeid\
FROM orderlines o, products p\
WHERE o.productid=p.productid\
AND EXTRACT(month from billdate)={0};".format(month)
cursor.execute(sql)
# retrieve the records from the database
records = cursor.fetchall()
colnames = [desc[0] for desc in cursor.description]
df1 = pd.DataFrame(records, columns=colnames)
df2 = df1[df1['nodeid'].isin(node_id)]
df3 = df2.groupby(['billdate', 'productid', 'asin', 'nodeid'], as_index=False)['numunits'].sum()
my_asin = df3['asin']
my_asin = set(my_asin)
asin_str = ', '.join(my_asin)
def solrWrap(core, params):
query_string = 'http://45.79.91.219:8983/solr/' + core + '/select?' # connecting to our linode server
for key in params:
query_string = query_string + key + '=' + params[key] + '&'
# print (query_string)
solrcon = pysolr.Solr(query_string, timeout=10)
results = solrcon.search('*:*')
docs = pd.DataFrame(results.docs)
return docs
d3 = {'q': 'asin:(%s)' % asin_str, 'rows': '77165'}
d_res3 = solrWrap('bookstore', d3)
polarity_measure = []
for i in range(d_res3.shape[0]):
str1 = str(d_res3.reviewText[i])
blob = tb(str1)
polarity_measure.append(blob.sentiment.polarity)
se = pd.Series(polarity_measure)
d_res3['Sentiment_polarity'] = se.values
d_res3['asin'] = d_res3['asin_str'].apply(lambda x: '' + str(x)[2:-2] + '')
df_sentiment = d_res3.groupby(['asin'], as_index=False)['Sentiment_polarity'].mean()
result = pd.merge(df3, df_sentiment, on='asin', how='inner')
final_result = result.reset_index().to_json(orient='records')
return final_result
if __name__ == "__main__":
app.run(host='0.0.0.0',port=80)
| c = request.args[item]
print (c)
d[c] = result[c] | conditional_block |
Data_Exploration.py | from __future__ import print_function, division, unicode_literals
from datetime import date, datetime
import simplejson
from flask import Flask, request, jsonify
from www.archive.datasources import AsterixDataSource
from www.archive.datasources import SolrDataSource
from www.archive.datasources import convertToIn
from www.archive.middleware import VirtualIntegrationSchema
from www.archive.middleware import WebSession
#from sentiment_polarity import Sales_Reviews
import pandas as pd
from urllib import parse, request
import json
from json import loads
import psycopg2
from sqlalchemy import create_engine, text
import pysolr
from textblob import TextBlob as tb
app = Flask(__name__)
@app.route("/")
def Hello():
return "Hello World!"
@app.route('/api/service', methods=['POST'])
def api_service():
query = request.get_json(silent=True)
# needs to change to reading from xml file
xml = VirtualIntegrationSchema()
web_session = WebSession(xml)
return jsonify(web_session.get_result_sets(query))
def json_serial(obj):
|
@app.route("/api/web_method/<format>")
def api_web_method(format):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select *
from orderlines o, products p
where o.productid = p.productid
LIMIT 10
"""
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {}
for item in request.args:
c = request.args[item]
print (c)
d[c] = result[c]
l.append(d)
#
theresult_json = json.dumps(l, default=json_serial)
conn.close()
return theresult_json
@app.route("/api/correlation/<col1>/<col2>")
def Correlation(col1, col2):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select corr(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/covariance/<col1>/<col2>", methods=['GET'])
def Covariance(col1, col2):
"""Determine the covariance coefficient between two columns."""
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select covar_samp(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/histogram/<groupby>/<count>")
def Histogram(groupby, count):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT %s AS Group, count(%s) AS Count
FROM orders
Group by %s
Order by count(%s) DESC
""" % (groupby, count, groupby, count)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'Group': result[0], 'Count': result[1]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return theresult_json
@app.route('/api/add_message/<uuid>', methods=['GET', 'POST'])
def add_message(uuid):
content = request.get_json(silent=True)
print (content)
return jsonify('{"h" : "ok"}')
@app.route('/api/asterixwrap', methods=['GET'])
def api_asterixwrap():
sql = """USE AstxDB;
select * from TableBT v;
"""
ads = AsterixDataSource()
jsonobj = ads.execute(sql)
return jsonify(jsonobj)
def getNodeIds(category_list):
_where = ' OR '.join(['user.category.nested.nested.level_2 = "{0}"'.format(x) for x in category_list])
sql="""
use bookstore_dp;
select user.nodeID
from ClassificationInfo user
WHERE {0}
""".format(_where)
# where user.category.nested.nested.level_2 = "Education & Reference";
ads = AsterixDataSource(host="132.249.238.32")
jsonobj = ads.execute(sql)
return jsonobj
@app.route("/api/highest_monthly_sales_by_category/<list>")
def api_highest_monthly_sales_by_category(list):
category_list = list.split(",")
# print(category_list)
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
#engine = create_engine('postgresql+psycopg2://postgres@45.79.91.219/MyBookStore')
#
conn = engine.connect()
#
_jlist = getNodeIds(category_list) # will be replaced by asterix call once connected to DB - the result will not change though
print(_jlist)
_inStr = convertToIn(_jlist)
# print(_inStr)
#
sql = """
SELECT mon, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.productid, EXTRACT(MONTH from billdate)
order by p.productid
) monthlysales
WHERE category IN {0}
GROUP By mon
ORDER BY num_sold DESC
""".format(_inStr)
#
print(sql)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'mon': int(result[0]), 'num_sold' : int(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return (theresult_json)
@app.route('/api/solrwrap', methods=['GET'])
def api_solrwrap():
q = "*:*"
ads = SolrDataSource()
jsonobj = ads.execute(q)
return jsonify(jsonobj)
@app.route("/api/Top_Categories/<num_categories>/<months>")
def Top_categories(num_categories, months):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthlysales
WHERE mon in ({0})
GROUP BY category
ORDER BY num_sold DESC
LIMIT ({1})
""".format(months,num_categories)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0], 'num_sold': float(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Discontinue_Stocking/<threshold>/<startyear>/<endyear>")
def Discontinue_Stocking(threshold, startyear, endyear):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category
FROM (
select EXTRACT(YEAR from o.billdate) as yr,
p.nodeid as category, sum(o.numunits) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid , EXTRACT(YEAR from o.billdate)
order by p.nodeid
) yearly_sales
where books_sold < {0} AND (yr < {2} AND yr >= {1})
""".format(threshold,startyear, endyear)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Downward_Sales/<season>")
def Downware_Sales(season):
# return jsonify({ "n" : num_categories, "m" : months})
seasons = {'spring':(3,4,5),
'summer':(6,7,8),
'fall':(9,10,11),
'winter':(12,1,2)
}
seasontrend = seasons.get((season.lower()))
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT s.category, round(avg(s.change_in_sales_from_last_month)) AS sale_trend
FROM
(
SELECT category, mon, count(mon) OVER (PARTITION BY category) as num_months,
books_sold - lag(books_sold,1) over (PARTITION BY category ORDER BY mon) as change_in_sales_from_last_month
FROM( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthly_sales
WHERE mon in {0}
GROUP BY category, mon, books_sold
) AS s
WHERE s.num_months = {1}
AND s.mon in {0}
GROUP BY s.category
having round(avg(s.change_in_sales_from_last_month)) < 0
ORDER BY sale_trend ASC
""".format(seasontrend,len(seasontrend))
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0],'SaleTrend': result[1]}
l.append(d)
theresult_json = simplejson.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route('/sentiment_polarity/<category>/<month>', methods=['GET'])
def Sentiment_Polarity(category, month):
sr = Sales_Reviews(category, month)
return sr
def Sales_Reviews(category, month):
# AsterixDBConnection
class QueryResponse:
def __init__(self, raw_response):
self._json = loads(raw_response)
self.requestID = self._json['requestID'] if 'requestID' in self._json else None
self.clientContextID = self._json['clientContextID'] if 'clientContextID' in self._json else None
self.signature = self._json['signature'] if 'signature' in self._json else None
self.results = self._json['results'] if 'results' in self._json else None
self.metrics = self._json['metrics'] if 'metrics' in self._json else None
class AsterixConnection:
def __init__(self, server='http://45.79.91.219', port=19002):
self._server = server
self._port = port
self._url_base = self._server + ':' + str(port)
def query(self, statement, pretty=False, client_context_id=None):
endpoint = '/query/service'
url = self._url_base + endpoint
payload = {
'statement': statement,
'pretty': pretty
}
if client_context_id:
payload['client_context_id'] = client_context_id
data = parse.urlencode(payload).encode("utf-8")
req = request.Request(url, data)
response = request.urlopen(req).read()
return QueryResponse(response)
asterix_conn = AsterixConnection()
axquery = '''
use bookstore_dp;
SELECT * from ClassificationInfo where classification LIKE "%{0}%";'''.format(category)
response = asterix_conn.query(axquery)
df = pd.DataFrame(response.results)
node_id = []
for i in range(df.shape[0]):
a = df.ClassificationInfo[i]['nodeID']
node_id.append(a)
node_id = [str(x) for x in node_id]
node_id = set(node_id)
conn_string = "host='45.79.91.219' dbname='MyBookStore' user='postgres' password=''"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print ("Connected!\n")
sql = "SELECT DISTINCT o.productid, o.billdate, o.numunits, p.asin,p.nodeid\
FROM orderlines o, products p\
WHERE o.productid=p.productid\
AND EXTRACT(month from billdate)={0};".format(month)
cursor.execute(sql)
# retrieve the records from the database
records = cursor.fetchall()
colnames = [desc[0] for desc in cursor.description]
df1 = pd.DataFrame(records, columns=colnames)
df2 = df1[df1['nodeid'].isin(node_id)]
df3 = df2.groupby(['billdate', 'productid', 'asin', 'nodeid'], as_index=False)['numunits'].sum()
my_asin = df3['asin']
my_asin = set(my_asin)
asin_str = ', '.join(my_asin)
def solrWrap(core, params):
query_string = 'http://45.79.91.219:8983/solr/' + core + '/select?' # connecting to our linode server
for key in params:
query_string = query_string + key + '=' + params[key] + '&'
# print (query_string)
solrcon = pysolr.Solr(query_string, timeout=10)
results = solrcon.search('*:*')
docs = pd.DataFrame(results.docs)
return docs
d3 = {'q': 'asin:(%s)' % asin_str, 'rows': '77165'}
d_res3 = solrWrap('bookstore', d3)
polarity_measure = []
for i in range(d_res3.shape[0]):
str1 = str(d_res3.reviewText[i])
blob = tb(str1)
polarity_measure.append(blob.sentiment.polarity)
se = pd.Series(polarity_measure)
d_res3['Sentiment_polarity'] = se.values
d_res3['asin'] = d_res3['asin_str'].apply(lambda x: '' + str(x)[2:-2] + '')
df_sentiment = d_res3.groupby(['asin'], as_index=False)['Sentiment_polarity'].mean()
result = pd.merge(df3, df_sentiment, on='asin', how='inner')
final_result = result.reset_index().to_json(orient='records')
return final_result
if __name__ == "__main__":
app.run(host='0.0.0.0',port=80)
| """JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj)) | identifier_body |
Data_Exploration.py | from __future__ import print_function, division, unicode_literals
from datetime import date, datetime
import simplejson
from flask import Flask, request, jsonify
from www.archive.datasources import AsterixDataSource
from www.archive.datasources import SolrDataSource
from www.archive.datasources import convertToIn
from www.archive.middleware import VirtualIntegrationSchema
from www.archive.middleware import WebSession
#from sentiment_polarity import Sales_Reviews
import pandas as pd
from urllib import parse, request
import json
from json import loads
import psycopg2
from sqlalchemy import create_engine, text
import pysolr
from textblob import TextBlob as tb
app = Flask(__name__)
@app.route("/")
def Hello():
return "Hello World!"
@app.route('/api/service', methods=['POST'])
def api_service():
query = request.get_json(silent=True)
# needs to change to reading from xml file
xml = VirtualIntegrationSchema()
web_session = WebSession(xml)
return jsonify(web_session.get_result_sets(query))
def | (obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
@app.route("/api/web_method/<format>")
def api_web_method(format):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select *
from orderlines o, products p
where o.productid = p.productid
LIMIT 10
"""
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {}
for item in request.args:
c = request.args[item]
print (c)
d[c] = result[c]
l.append(d)
#
theresult_json = json.dumps(l, default=json_serial)
conn.close()
return theresult_json
@app.route("/api/correlation/<col1>/<col2>")
def Correlation(col1, col2):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select corr(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/covariance/<col1>/<col2>", methods=['GET'])
def Covariance(col1, col2):
"""Determine the covariance coefficient between two columns."""
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select covar_samp(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/histogram/<groupby>/<count>")
def Histogram(groupby, count):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT %s AS Group, count(%s) AS Count
FROM orders
Group by %s
Order by count(%s) DESC
""" % (groupby, count, groupby, count)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'Group': result[0], 'Count': result[1]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return theresult_json
@app.route('/api/add_message/<uuid>', methods=['GET', 'POST'])
def add_message(uuid):
content = request.get_json(silent=True)
print (content)
return jsonify('{"h" : "ok"}')
@app.route('/api/asterixwrap', methods=['GET'])
def api_asterixwrap():
sql = """USE AstxDB;
select * from TableBT v;
"""
ads = AsterixDataSource()
jsonobj = ads.execute(sql)
return jsonify(jsonobj)
def getNodeIds(category_list):
_where = ' OR '.join(['user.category.nested.nested.level_2 = "{0}"'.format(x) for x in category_list])
sql="""
use bookstore_dp;
select user.nodeID
from ClassificationInfo user
WHERE {0}
""".format(_where)
# where user.category.nested.nested.level_2 = "Education & Reference";
ads = AsterixDataSource(host="132.249.238.32")
jsonobj = ads.execute(sql)
return jsonobj
@app.route("/api/highest_monthly_sales_by_category/<list>")
def api_highest_monthly_sales_by_category(list):
category_list = list.split(",")
# print(category_list)
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
#engine = create_engine('postgresql+psycopg2://postgres@45.79.91.219/MyBookStore')
#
conn = engine.connect()
#
_jlist = getNodeIds(category_list) # will be replaced by asterix call once connected to DB - the result will not change though
print(_jlist)
_inStr = convertToIn(_jlist)
# print(_inStr)
#
sql = """
SELECT mon, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.productid, EXTRACT(MONTH from billdate)
order by p.productid
) monthlysales
WHERE category IN {0}
GROUP By mon
ORDER BY num_sold DESC
""".format(_inStr)
#
print(sql)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'mon': int(result[0]), 'num_sold' : int(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return (theresult_json)
@app.route('/api/solrwrap', methods=['GET'])
def api_solrwrap():
q = "*:*"
ads = SolrDataSource()
jsonobj = ads.execute(q)
return jsonify(jsonobj)
@app.route("/api/Top_Categories/<num_categories>/<months>")
def Top_categories(num_categories, months):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthlysales
WHERE mon in ({0})
GROUP BY category
ORDER BY num_sold DESC
LIMIT ({1})
""".format(months,num_categories)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0], 'num_sold': float(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Discontinue_Stocking/<threshold>/<startyear>/<endyear>")
def Discontinue_Stocking(threshold, startyear, endyear):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category
FROM (
select EXTRACT(YEAR from o.billdate) as yr,
p.nodeid as category, sum(o.numunits) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid , EXTRACT(YEAR from o.billdate)
order by p.nodeid
) yearly_sales
where books_sold < {0} AND (yr < {2} AND yr >= {1})
""".format(threshold,startyear, endyear)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Downward_Sales/<season>")
def Downware_Sales(season):
# return jsonify({ "n" : num_categories, "m" : months})
seasons = {'spring':(3,4,5),
'summer':(6,7,8),
'fall':(9,10,11),
'winter':(12,1,2)
}
seasontrend = seasons.get((season.lower()))
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT s.category, round(avg(s.change_in_sales_from_last_month)) AS sale_trend
FROM
(
SELECT category, mon, count(mon) OVER (PARTITION BY category) as num_months,
books_sold - lag(books_sold,1) over (PARTITION BY category ORDER BY mon) as change_in_sales_from_last_month
FROM( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthly_sales
WHERE mon in {0}
GROUP BY category, mon, books_sold
) AS s
WHERE s.num_months = {1}
AND s.mon in {0}
GROUP BY s.category
having round(avg(s.change_in_sales_from_last_month)) < 0
ORDER BY sale_trend ASC
""".format(seasontrend,len(seasontrend))
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0],'SaleTrend': result[1]}
l.append(d)
theresult_json = simplejson.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route('/sentiment_polarity/<category>/<month>', methods=['GET'])
def Sentiment_Polarity(category, month):
sr = Sales_Reviews(category, month)
return sr
def Sales_Reviews(category, month):
# AsterixDBConnection
class QueryResponse:
def __init__(self, raw_response):
self._json = loads(raw_response)
self.requestID = self._json['requestID'] if 'requestID' in self._json else None
self.clientContextID = self._json['clientContextID'] if 'clientContextID' in self._json else None
self.signature = self._json['signature'] if 'signature' in self._json else None
self.results = self._json['results'] if 'results' in self._json else None
self.metrics = self._json['metrics'] if 'metrics' in self._json else None
class AsterixConnection:
def __init__(self, server='http://45.79.91.219', port=19002):
self._server = server
self._port = port
self._url_base = self._server + ':' + str(port)
def query(self, statement, pretty=False, client_context_id=None):
endpoint = '/query/service'
url = self._url_base + endpoint
payload = {
'statement': statement,
'pretty': pretty
}
if client_context_id:
payload['client_context_id'] = client_context_id
data = parse.urlencode(payload).encode("utf-8")
req = request.Request(url, data)
response = request.urlopen(req).read()
return QueryResponse(response)
asterix_conn = AsterixConnection()
axquery = '''
use bookstore_dp;
SELECT * from ClassificationInfo where classification LIKE "%{0}%";'''.format(category)
response = asterix_conn.query(axquery)
df = pd.DataFrame(response.results)
node_id = []
for i in range(df.shape[0]):
a = df.ClassificationInfo[i]['nodeID']
node_id.append(a)
node_id = [str(x) for x in node_id]
node_id = set(node_id)
conn_string = "host='45.79.91.219' dbname='MyBookStore' user='postgres' password=''"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print ("Connected!\n")
sql = "SELECT DISTINCT o.productid, o.billdate, o.numunits, p.asin,p.nodeid\
FROM orderlines o, products p\
WHERE o.productid=p.productid\
AND EXTRACT(month from billdate)={0};".format(month)
cursor.execute(sql)
# retrieve the records from the database
records = cursor.fetchall()
colnames = [desc[0] for desc in cursor.description]
df1 = pd.DataFrame(records, columns=colnames)
df2 = df1[df1['nodeid'].isin(node_id)]
df3 = df2.groupby(['billdate', 'productid', 'asin', 'nodeid'], as_index=False)['numunits'].sum()
my_asin = df3['asin']
my_asin = set(my_asin)
asin_str = ', '.join(my_asin)
def solrWrap(core, params):
query_string = 'http://45.79.91.219:8983/solr/' + core + '/select?' # connecting to our linode server
for key in params:
query_string = query_string + key + '=' + params[key] + '&'
# print (query_string)
solrcon = pysolr.Solr(query_string, timeout=10)
results = solrcon.search('*:*')
docs = pd.DataFrame(results.docs)
return docs
d3 = {'q': 'asin:(%s)' % asin_str, 'rows': '77165'}
d_res3 = solrWrap('bookstore', d3)
polarity_measure = []
for i in range(d_res3.shape[0]):
str1 = str(d_res3.reviewText[i])
blob = tb(str1)
polarity_measure.append(blob.sentiment.polarity)
se = pd.Series(polarity_measure)
d_res3['Sentiment_polarity'] = se.values
d_res3['asin'] = d_res3['asin_str'].apply(lambda x: '' + str(x)[2:-2] + '')
df_sentiment = d_res3.groupby(['asin'], as_index=False)['Sentiment_polarity'].mean()
result = pd.merge(df3, df_sentiment, on='asin', how='inner')
final_result = result.reset_index().to_json(orient='records')
return final_result
if __name__ == "__main__":
app.run(host='0.0.0.0',port=80)
| json_serial | identifier_name |
Data_Exploration.py | from __future__ import print_function, division, unicode_literals
from datetime import date, datetime
import simplejson
from flask import Flask, request, jsonify
from www.archive.datasources import AsterixDataSource
from www.archive.datasources import SolrDataSource
from www.archive.datasources import convertToIn
from www.archive.middleware import VirtualIntegrationSchema
from www.archive.middleware import WebSession
#from sentiment_polarity import Sales_Reviews
import pandas as pd
from urllib import parse, request
import json |
app = Flask(__name__)
@app.route("/")
def Hello():
return "Hello World!"
@app.route('/api/service', methods=['POST'])
def api_service():
query = request.get_json(silent=True)
# needs to change to reading from xml file
xml = VirtualIntegrationSchema()
web_session = WebSession(xml)
return jsonify(web_session.get_result_sets(query))
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
@app.route("/api/web_method/<format>")
def api_web_method(format):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select *
from orderlines o, products p
where o.productid = p.productid
LIMIT 10
"""
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {}
for item in request.args:
c = request.args[item]
print (c)
d[c] = result[c]
l.append(d)
#
theresult_json = json.dumps(l, default=json_serial)
conn.close()
return theresult_json
@app.route("/api/correlation/<col1>/<col2>")
def Correlation(col1, col2):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select corr(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/covariance/<col1>/<col2>", methods=['GET'])
def Covariance(col1, col2):
"""Determine the covariance coefficient between two columns."""
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
select covar_samp(%s::int, %s::int)
from orderlines o, products p
where o.productid = p.productid
""" %(col1, col2)
stmt = text(sql)
result = conn.execute(stmt)
for row in result:
print(row[0])
conn.close()
return str(row[0])
@app.route("/api/histogram/<groupby>/<count>")
def Histogram(groupby, count):
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT %s AS Group, count(%s) AS Count
FROM orders
Group by %s
Order by count(%s) DESC
""" % (groupby, count, groupby, count)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'Group': result[0], 'Count': result[1]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return theresult_json
@app.route('/api/add_message/<uuid>', methods=['GET', 'POST'])
def add_message(uuid):
content = request.get_json(silent=True)
print (content)
return jsonify('{"h" : "ok"}')
@app.route('/api/asterixwrap', methods=['GET'])
def api_asterixwrap():
sql = """USE AstxDB;
select * from TableBT v;
"""
ads = AsterixDataSource()
jsonobj = ads.execute(sql)
return jsonify(jsonobj)
def getNodeIds(category_list):
_where = ' OR '.join(['user.category.nested.nested.level_2 = "{0}"'.format(x) for x in category_list])
sql="""
use bookstore_dp;
select user.nodeID
from ClassificationInfo user
WHERE {0}
""".format(_where)
# where user.category.nested.nested.level_2 = "Education & Reference";
ads = AsterixDataSource(host="132.249.238.32")
jsonobj = ads.execute(sql)
return jsonobj
@app.route("/api/highest_monthly_sales_by_category/<list>")
def api_highest_monthly_sales_by_category(list):
category_list = list.split(",")
# print(category_list)
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
#engine = create_engine('postgresql+psycopg2://postgres@45.79.91.219/MyBookStore')
#
conn = engine.connect()
#
_jlist = getNodeIds(category_list) # will be replaced by asterix call once connected to DB - the result will not change though
print(_jlist)
_inStr = convertToIn(_jlist)
# print(_inStr)
#
sql = """
SELECT mon, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.productid, EXTRACT(MONTH from billdate)
order by p.productid
) monthlysales
WHERE category IN {0}
GROUP By mon
ORDER BY num_sold DESC
""".format(_inStr)
#
print(sql)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'mon': int(result[0]), 'num_sold' : int(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
return (theresult_json)
@app.route('/api/solrwrap', methods=['GET'])
def api_solrwrap():
q = "*:*"
ads = SolrDataSource()
jsonobj = ads.execute(q)
return jsonify(jsonobj)
@app.route("/api/Top_Categories/<num_categories>/<months>")
def Top_categories(num_categories, months):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category, sum(books_sold) AS num_sold
FROM
( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthlysales
WHERE mon in ({0})
GROUP BY category
ORDER BY num_sold DESC
LIMIT ({1})
""".format(months,num_categories)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0], 'num_sold': float(result[1])}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Discontinue_Stocking/<threshold>/<startyear>/<endyear>")
def Discontinue_Stocking(threshold, startyear, endyear):
# return jsonify({ "n" : num_categories, "m" : months})
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT category
FROM (
select EXTRACT(YEAR from o.billdate) as yr,
p.nodeid as category, sum(o.numunits) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid , EXTRACT(YEAR from o.billdate)
order by p.nodeid
) yearly_sales
where books_sold < {0} AND (yr < {2} AND yr >= {1})
""".format(threshold,startyear, endyear)
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0]}
l.append(d)
theresult_json = json.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route("/api/Downward_Sales/<season>")
def Downware_Sales(season):
# return jsonify({ "n" : num_categories, "m" : months})
seasons = {'spring':(3,4,5),
'summer':(6,7,8),
'fall':(9,10,11),
'winter':(12,1,2)
}
seasontrend = seasons.get((season.lower()))
engine = create_engine('postgresql+psycopg2://student:123456@132.249.238.27:5432/bookstore_dp')
conn = engine.connect()
sql = """
SELECT s.category, round(avg(s.change_in_sales_from_last_month)) AS sale_trend
FROM
(
SELECT category, mon, count(mon) OVER (PARTITION BY category) as num_months,
books_sold - lag(books_sold,1) over (PARTITION BY category ORDER BY mon) as change_in_sales_from_last_month
FROM( select EXTRACT(MONTH from o.billdate) as mon, p.nodeid as category, count(o.orderid) as books_sold
from orderlines as o, products as p
where o.productid = p.productid AND o.totalprice > 0::money
group by p.nodeid, EXTRACT(MONTH from billdate)
order by p.nodeid
) monthly_sales
WHERE mon in {0}
GROUP BY category, mon, books_sold
) AS s
WHERE s.num_months = {1}
AND s.mon in {0}
GROUP BY s.category
having round(avg(s.change_in_sales_from_last_month)) < 0
ORDER BY sale_trend ASC
""".format(seasontrend,len(seasontrend))
stmt = text(sql)
results = conn.execute(stmt)
l = []
for result in results:
d = {'category': result[0],'SaleTrend': result[1]}
l.append(d)
theresult_json = simplejson.dumps(l)
conn.close()
# return (results)
# print(theresult_json)
return (theresult_json)
@app.route('/sentiment_polarity/<category>/<month>', methods=['GET'])
def Sentiment_Polarity(category, month):
sr = Sales_Reviews(category, month)
return sr
def Sales_Reviews(category, month):
# AsterixDBConnection
class QueryResponse:
def __init__(self, raw_response):
self._json = loads(raw_response)
self.requestID = self._json['requestID'] if 'requestID' in self._json else None
self.clientContextID = self._json['clientContextID'] if 'clientContextID' in self._json else None
self.signature = self._json['signature'] if 'signature' in self._json else None
self.results = self._json['results'] if 'results' in self._json else None
self.metrics = self._json['metrics'] if 'metrics' in self._json else None
class AsterixConnection:
def __init__(self, server='http://45.79.91.219', port=19002):
self._server = server
self._port = port
self._url_base = self._server + ':' + str(port)
def query(self, statement, pretty=False, client_context_id=None):
endpoint = '/query/service'
url = self._url_base + endpoint
payload = {
'statement': statement,
'pretty': pretty
}
if client_context_id:
payload['client_context_id'] = client_context_id
data = parse.urlencode(payload).encode("utf-8")
req = request.Request(url, data)
response = request.urlopen(req).read()
return QueryResponse(response)
asterix_conn = AsterixConnection()
axquery = '''
use bookstore_dp;
SELECT * from ClassificationInfo where classification LIKE "%{0}%";'''.format(category)
response = asterix_conn.query(axquery)
df = pd.DataFrame(response.results)
node_id = []
for i in range(df.shape[0]):
a = df.ClassificationInfo[i]['nodeID']
node_id.append(a)
node_id = [str(x) for x in node_id]
node_id = set(node_id)
conn_string = "host='45.79.91.219' dbname='MyBookStore' user='postgres' password=''"
print ("Connecting to database\n ->%s" % (conn_string))
conn = psycopg2.connect(conn_string)
cursor = conn.cursor()
print ("Connected!\n")
sql = "SELECT DISTINCT o.productid, o.billdate, o.numunits, p.asin,p.nodeid\
FROM orderlines o, products p\
WHERE o.productid=p.productid\
AND EXTRACT(month from billdate)={0};".format(month)
cursor.execute(sql)
# retrieve the records from the database
records = cursor.fetchall()
colnames = [desc[0] for desc in cursor.description]
df1 = pd.DataFrame(records, columns=colnames)
df2 = df1[df1['nodeid'].isin(node_id)]
df3 = df2.groupby(['billdate', 'productid', 'asin', 'nodeid'], as_index=False)['numunits'].sum()
my_asin = df3['asin']
my_asin = set(my_asin)
asin_str = ', '.join(my_asin)
def solrWrap(core, params):
query_string = 'http://45.79.91.219:8983/solr/' + core + '/select?' # connecting to our linode server
for key in params:
query_string = query_string + key + '=' + params[key] + '&'
# print (query_string)
solrcon = pysolr.Solr(query_string, timeout=10)
results = solrcon.search('*:*')
docs = pd.DataFrame(results.docs)
return docs
d3 = {'q': 'asin:(%s)' % asin_str, 'rows': '77165'}
d_res3 = solrWrap('bookstore', d3)
polarity_measure = []
for i in range(d_res3.shape[0]):
str1 = str(d_res3.reviewText[i])
blob = tb(str1)
polarity_measure.append(blob.sentiment.polarity)
se = pd.Series(polarity_measure)
d_res3['Sentiment_polarity'] = se.values
d_res3['asin'] = d_res3['asin_str'].apply(lambda x: '' + str(x)[2:-2] + '')
df_sentiment = d_res3.groupby(['asin'], as_index=False)['Sentiment_polarity'].mean()
result = pd.merge(df3, df_sentiment, on='asin', how='inner')
final_result = result.reset_index().to_json(orient='records')
return final_result
if __name__ == "__main__":
app.run(host='0.0.0.0',port=80) | from json import loads
import psycopg2
from sqlalchemy import create_engine, text
import pysolr
from textblob import TextBlob as tb | random_line_split |
ConfiguredResourceUploader.js | /*
* web: ConfiguredResourceUploader.js
* XNAT http://www.xnat.org
* Copyright (c) 2005-2017, Washington University School of Medicine and Howard Hughes Medical Institute
* All Rights Reserved
*
* Released under the Simplified BSD.
*/
/*
* resource dialog is used to upload resources at any level
*/
if(XNAT.app.crConfigs==undefined){
XNAT.app.crConfigs={
load:function(){
YAHOO.util.Connect.asyncRequest('GET', serverRoot+'/data/projects/' + XNAT.app.crConfigs.project +'/config/resource_config/script?format=json', {success : XNAT.app.crConfigs.handleLoad, failure : function(){}, cache : false, scope : XNAT.app.crConfigs});
},
handleLoad:function(obj){
var parsedResponse = YAHOO.lang.JSON.parse(obj.responseText);
var script = "";
if (parsedResponse.ResultSet.Result.length !== 0) {
script = parsedResponse.ResultSet.Result[0].script;
//sort of a hack to get this code to work with generic nrg_config return values
if(script == undefined ){
script = parsedResponse.ResultSet.Result[0].contents;
}
this.configs=YAHOO.lang.JSON.parse(script);
this.showLinks();
}
},
showLinks:function(){
$("a.uploadLink").each(function(value){
var type=$(this).attr('data-type');
var tempConfigs=new Array();
var props=$(this).attr('data-props');
| if(tempConfigs.length>0){
if(value.dontHide){
$(value).color(value.defaultColor);
$(value).css('cursor:pointer');
}
$(this).click(function(){
XNAT.app.crUploader.show(this);
return false;
});
$(this).show();
}else{
if(!value.dontHide){
$(this).hide();
}
}
});
},
getConfigsByType:function(type){
var temp=new Array();
jq.each(this.configs,function(i1,v1){
if(v1.type==type){
temp.push(v1);
}
});
return temp;
},
getAllConfigsByType:function(type, props){
var tmpConfigs=this.getConfigsByType(type);
var typeInfo=window.available_elements.getByName(type);
if(typeInfo!=undefined){
if(typeInfo.isSubjectAssessor){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:subjectAssessorData"));
}
if(typeInfo.isImageAssessor){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:imageAssessorData"));
}
if(typeInfo.isImageSession){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:imageSessionData"));
}
if(typeInfo.isImageScan){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:imageScanData"));
}
}
var tempConfigs2=new Array();
//allow filtering of links
jq.each(tmpConfigs,function(i1,v1){
if(props!=undefined && props!=null && v1.filter){
var filters=v1.filter.split(",");
var matched=false;
jq.each(filters,function (i2,v2){
if(!matched){
if((v2.trim()==props.trim())){
matched=true;
}
}
});
if(matched){
tempConfigs2.push(v1);
}
}else{
tempConfigs2.push(v1);
}
});
return tempConfigs2;
}
}
YAHOO.util.Event.onDOMReady(XNAT.app.crConfigs.load);
XNAT.app.crUploader={
ready:false,
show:function(config){
this.project=XNAT.app.crConfigs.project;
this.type=$(config).attr('data-type');
this.uri=$(config).attr('data-uri');
this.props=$(config).attr('data-props');
this.updateForm();
XNAT.app.crUploader.dialog.render(document.body);
XNAT.app.crUploader.dialog.show();
},
updateForm:function(obj){
var configs=XNAT.app.crConfigs.getAllConfigsByType(this.type,this.props);
$('#cruSel').html("");
$('#cruSel').append($("<option value=''>SELECT</option>"));
//render select options
$.each(configs,function(index,value){
$('#cruSel').append($("<option value='" + value.name +"' data-message='" + ((value.description)?value.description:"") +"' data-level='" + ((value.level)?value.level:"") +"' data-overwrite='" + ((value.overwrite)?value.overwrite:"") +"' data-label='" + value.label +"' data-subdir='" + value.subdir +"'>" + value.name +"</option>"));
});
if(this.registered==undefined){
this.registered=true;
//add onchange event
$('#cruSel').change(function(){
if($(this).val()!=""){
var desc=$("#cruSel option:selected").attr("data-message");
if(desc!=null && desc!=undefined){
$('#cruMsg').html(desc);
}else{
$('#cruMsg').html("");
}
}else{
$('#cruMsg').html("");
}
});
}
},
doUpload:function(allowOverwrite){
//executes the selected upload operation
var frm=document.getElementById("cru_upload_frm");
if($('#cruSel').val()==""){
showMessage("page_body","Select resource","Please select a resource to upload.");
return;
}
if(requireReason && frm.event_reason.value==""){
showMessage("page_body","Include justification.","Please include a justification for this upload.");
return;
}
if(frm.upload_file.value==""){
showMessage("page_body","Select file.","Please use the file selector to choose a file to upload.");
return;
}
YAHOO.util.Connect.setForm(frm,true);
var callback={
upload:function(obj1){
closeModalPanel("cru_upl_mdl");
this.handleUpload(obj1);
},
scope:this
}
var selector=$("#cruSel option:selected");
this.overwrite=selector.attr("data-overwrite");
this.level=selector.attr("data-level");
var params="";
params+="&event_type=WEB_FORM";
params+="&event_action=Uploaded "+ $(selector).text();
params+="&extract=true";
if(showReason && frm.event_reason.value!=""){
params+="&event_reason="+frm.event_reason.value;
}else{
params+="&event_reason=standard upload";
}
openModalPanel("cru_upl_mdl","Uploading Files");
var subdir=$(selector).attr('data-subdir');
if(subdir!=null && subdir!=undefined){
if(!subdir.startsWith("/")){
subdir="/"+subdir;
}
if(!subdir.endsWith("/")){
subdir=subdir+"/";
}
}else{
subdir="";
}
var filepath=frm.upload_file.value;
while(filepath.indexOf("\\")>-1){
filepath=filepath.substring(filepath.indexOf("\\")+1);
}
filepath=subdir+filepath;
if(allowOverwrite){
params+="&overwrite=true";
}
var lvl="";
if(this.level!='default' && this.level!='' && this.level!=undefined){
lvl="/" + this.level;
}
YAHOO.util.Connect.asyncRequest('POST',this.uri + lvl +"/resources/"+ $(selector).attr('data-label') +"/files"+filepath+"?XNAT_CSRF=" + csrfToken +params,callback);
},
handleUpload:function(response,o2,o3){
//handles the response form the upload operation
//because this is a file upload, both successes and failures will use this method
if(response.responseText==undefined || response.responseText=="" || response.responseText.match(/^<pre.*?><\/pre>$/)){
showMessage("page_body","Upload successful.","Your files have been successfully uploaded.");
document.getElementById("cru_upload_frm").upload_file.value="";
if(window.viewer!=undefined && window.viewer.loading>0){
window.viewer.refreshCatalogs();
}
}else if(response.responseText!=undefined && (response.responseText.indexOf("File already exists")>-1 || response.responseText.indexOf("duplicates")>-1)){
if(this.overwrite){
this.confirm("Duplicate files", "The uploaded files already exist on the server. Would you like to overwrite those files?",function(){
//yes
this.hide();
XNAT.app.crUploader.doUpload(true);
},function(){
//no
this.hide();
XNAT.app.crUploader.dialog.hide();
});
}else{
showMessage("page_body","Failed upload.","The selected files already exist for this session.");
}
}else if(response.responseText==undefined){
//this block is for IE. For some reason the responseText is coming through as undefined.
showMessage("page_body","Failed upload.","Unable to upload selected files.");
}else{
showMessage("page_body","Failed upload.",response.responseText);
}
this.dialog.hide();
},
confirm : function (header, msg, handleYes, handleNo) {
var dialog = new YAHOO.widget.SimpleDialog('widget_confirm', {
visible:false,
width: '20em',
zIndex: 9998,
close: false,
fixedcenter: true,
modal: true,
draggable: true,
constraintoviewport: true,
icon: YAHOO.widget.SimpleDialog.ICON_WARN,
buttons: [
{ text: 'Yes', handler: handleYes},
{ text: 'No', handler: handleNo, isDefault: true }
]
});
dialog.setHeader(header);
dialog.setBody(msg);
dialog.cfg.queueProperty('icon', YAHOO.widget.SimpleDialog.ICON_HELP);
dialog.cfg.queueProperty('zIndex', 9998);
dialog.render(document.body);
dialog.show();
return dialog;
}
}
var tmpUploadFrm='<div id="cru_dialog" style="visibility:hidden">';
tmpUploadFrm+=' <div class="hd">Upload Files</div>';
tmpUploadFrm+=' <div class="bd" style="">';
tmpUploadFrm+=' <div class="cru_a">';
tmpUploadFrm+=' <form id="cru_upload_frm">';
tmpUploadFrm+=' <div>Select destination directory for uploaded files:</div>';
tmpUploadFrm+=' <div><select id="cruSel"><option value="">SELECT</option></select></div>';
tmpUploadFrm+=' <div id="cruMsg" style="padding:4px;"></div>';
tmpUploadFrm+=' <div style="margin-top:12px;margin-bottom:16px;">Local File: <input type="file" id="cru_upload_file" name="upload_file"/></div>';
if(showReason){
tmpUploadFrm+=' <div style="margin-bottom:16px;">Justification:<br><textarea id="cru_event_reason" name="event_reason" cols="50" rows="3"></textarea></div>';
}
tmpUploadFrm+=' </form>';
tmpUploadFrm+=' </div>';
tmpUploadFrm+=' </div> ';
tmpUploadFrm+='</div> ';
$("body").append(tmpUploadFrm);
//initialize modal upload dialog
XNAT.app.crUploader.dialog=new YAHOO.widget.Dialog("cru_dialog", { fixedcenter:true, visible:false, width:"500px", height:"300px", modal:true, close:true, draggable:true } ),
XNAT.app.crUploader.dialog.cfg.queueProperty("buttons", [{ text:"Cancel", handler:{fn:function(){XNAT.app.crUploader.dialog.hide();}}},{ text:"Upload", handler:{fn:function(){XNAT.app.crUploader.doUpload();}}, isDefault:true}]);
} |
var tempConfigs=XNAT.app.crConfigs.getAllConfigsByType(type,props)
| random_line_split |
ConfiguredResourceUploader.js | /*
* web: ConfiguredResourceUploader.js
* XNAT http://www.xnat.org
* Copyright (c) 2005-2017, Washington University School of Medicine and Howard Hughes Medical Institute
* All Rights Reserved
*
* Released under the Simplified BSD.
*/
/*
* resource dialog is used to upload resources at any level
*/
if(XNAT.app.crConfigs==undefined){
XNAT.app.crConfigs={
load:function(){
YAHOO.util.Connect.asyncRequest('GET', serverRoot+'/data/projects/' + XNAT.app.crConfigs.project +'/config/resource_config/script?format=json', {success : XNAT.app.crConfigs.handleLoad, failure : function(){}, cache : false, scope : XNAT.app.crConfigs});
},
handleLoad:function(obj){
var parsedResponse = YAHOO.lang.JSON.parse(obj.responseText);
var script = "";
if (parsedResponse.ResultSet.Result.length !== 0) {
script = parsedResponse.ResultSet.Result[0].script;
//sort of a hack to get this code to work with generic nrg_config return values
if(script == undefined ){
script = parsedResponse.ResultSet.Result[0].contents;
}
this.configs=YAHOO.lang.JSON.parse(script);
this.showLinks();
}
},
showLinks:function(){
$("a.uploadLink").each(function(value){
var type=$(this).attr('data-type');
var tempConfigs=new Array();
var props=$(this).attr('data-props');
var tempConfigs=XNAT.app.crConfigs.getAllConfigsByType(type,props)
if(tempConfigs.length>0){
if(value.dontHide){
$(value).color(value.defaultColor);
$(value).css('cursor:pointer');
}
$(this).click(function(){
XNAT.app.crUploader.show(this);
return false;
});
$(this).show();
}else{
if(!value.dontHide){
$(this).hide();
}
}
});
},
getConfigsByType:function(type){
var temp=new Array();
jq.each(this.configs,function(i1,v1){
if(v1.type==type){
temp.push(v1);
}
});
return temp;
},
getAllConfigsByType:function(type, props){
var tmpConfigs=this.getConfigsByType(type);
var typeInfo=window.available_elements.getByName(type);
if(typeInfo!=undefined){
if(typeInfo.isSubjectAssessor){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:subjectAssessorData"));
}
if(typeInfo.isImageAssessor){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:imageAssessorData"));
}
if(typeInfo.isImageSession){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:imageSessionData"));
}
if(typeInfo.isImageScan){
tmpConfigs=tmpConfigs.concat(this.getConfigsByType("xnat:imageScanData"));
}
}
var tempConfigs2=new Array();
//allow filtering of links
jq.each(tmpConfigs,function(i1,v1){
if(props!=undefined && props!=null && v1.filter){
var filters=v1.filter.split(",");
var matched=false;
jq.each(filters,function (i2,v2){
if(!matched){
if((v2.trim()==props.trim())){
matched=true;
}
}
});
if(matched){
tempConfigs2.push(v1);
}
}else{
tempConfigs2.push(v1);
}
});
return tempConfigs2;
}
}
YAHOO.util.Event.onDOMReady(XNAT.app.crConfigs.load);
XNAT.app.crUploader={
ready:false,
show:function(config){
this.project=XNAT.app.crConfigs.project;
this.type=$(config).attr('data-type');
this.uri=$(config).attr('data-uri');
this.props=$(config).attr('data-props');
this.updateForm();
XNAT.app.crUploader.dialog.render(document.body);
XNAT.app.crUploader.dialog.show();
},
updateForm:function(obj){
var configs=XNAT.app.crConfigs.getAllConfigsByType(this.type,this.props);
$('#cruSel').html("");
$('#cruSel').append($("<option value=''>SELECT</option>"));
//render select options
$.each(configs,function(index,value){
$('#cruSel').append($("<option value='" + value.name +"' data-message='" + ((value.description)?value.description:"") +"' data-level='" + ((value.level)?value.level:"") +"' data-overwrite='" + ((value.overwrite)?value.overwrite:"") +"' data-label='" + value.label +"' data-subdir='" + value.subdir +"'>" + value.name +"</option>"));
});
if(this.registered==undefined){
this.registered=true;
//add onchange event
$('#cruSel').change(function(){
if($(this).val()!=""){
var desc=$("#cruSel option:selected").attr("data-message");
if(desc!=null && desc!=undefined){
$('#cruMsg').html(desc);
}else{
$('#cruMsg').html("");
}
}else{
$('#cruMsg').html("");
}
});
}
},
doUpload:function(allowOverwrite){
//executes the selected upload operation
var frm=document.getElementById("cru_upload_frm");
if($('#cruSel').val()==""){
showMessage("page_body","Select resource","Please select a resource to upload.");
return;
}
if(requireReason && frm.event_reason.value==""){
showMessage("page_body","Include justification.","Please include a justification for this upload.");
return;
}
if(frm.upload_file.value==""){
showMessage("page_body","Select file.","Please use the file selector to choose a file to upload.");
return;
}
YAHOO.util.Connect.setForm(frm,true);
var callback={
upload:function(obj1){
closeModalPanel("cru_upl_mdl");
this.handleUpload(obj1);
},
scope:this
}
var selector=$("#cruSel option:selected");
this.overwrite=selector.attr("data-overwrite");
this.level=selector.attr("data-level");
var params="";
params+="&event_type=WEB_FORM";
params+="&event_action=Uploaded "+ $(selector).text();
params+="&extract=true";
if(showReason && frm.event_reason.value!=""){
params+="&event_reason="+frm.event_reason.value;
}else{
params+="&event_reason=standard upload";
}
openModalPanel("cru_upl_mdl","Uploading Files");
var subdir=$(selector).attr('data-subdir');
if(subdir!=null && subdir!=undefined){
if(!subdir.startsWith("/")){
subdir="/"+subdir;
}
if(!subdir.endsWith("/")){
subdir=subdir+"/";
}
}else{
subdir="";
}
var filepath=frm.upload_file.value;
while(filepath.indexOf("\\")>-1){
filepath=filepath.substring(filepath.indexOf("\\")+1);
}
filepath=subdir+filepath;
if(allowOverwrite){
params+="&overwrite=true";
}
var lvl="";
if(this.level!='default' && this.level!='' && this.level!=undefined){
lvl="/" + this.level;
}
YAHOO.util.Connect.asyncRequest('POST',this.uri + lvl +"/resources/"+ $(selector).attr('data-label') +"/files"+filepath+"?XNAT_CSRF=" + csrfToken +params,callback);
},
handleUpload:function(response,o2,o3){
//handles the response form the upload operation
//because this is a file upload, both successes and failures will use this method
if(response.responseText==undefined || response.responseText=="" || response.responseText.match(/^<pre.*?><\/pre>$/)){
showMessage("page_body","Upload successful.","Your files have been successfully uploaded.");
document.getElementById("cru_upload_frm").upload_file.value="";
if(window.viewer!=undefined && window.viewer.loading>0){
window.viewer.refreshCatalogs();
}
}else if(response.responseText!=undefined && (response.responseText.indexOf("File already exists")>-1 || response.responseText.indexOf("duplicates")>-1)){
if(this.overwrite){
this.confirm("Duplicate files", "The uploaded files already exist on the server. Would you like to overwrite those files?",function(){
//yes
this.hide();
XNAT.app.crUploader.doUpload(true);
},function(){
//no
this.hide();
XNAT.app.crUploader.dialog.hide();
});
}else{
showMessage("page_body","Failed upload.","The selected files already exist for this session.");
}
}else if(response.responseText==undefined){
//this block is for IE. For some reason the responseText is coming through as undefined.
showMessage("page_body","Failed upload.","Unable to upload selected files.");
}else |
this.dialog.hide();
},
confirm : function (header, msg, handleYes, handleNo) {
var dialog = new YAHOO.widget.SimpleDialog('widget_confirm', {
visible:false,
width: '20em',
zIndex: 9998,
close: false,
fixedcenter: true,
modal: true,
draggable: true,
constraintoviewport: true,
icon: YAHOO.widget.SimpleDialog.ICON_WARN,
buttons: [
{ text: 'Yes', handler: handleYes},
{ text: 'No', handler: handleNo, isDefault: true }
]
});
dialog.setHeader(header);
dialog.setBody(msg);
dialog.cfg.queueProperty('icon', YAHOO.widget.SimpleDialog.ICON_HELP);
dialog.cfg.queueProperty('zIndex', 9998);
dialog.render(document.body);
dialog.show();
return dialog;
}
}
var tmpUploadFrm='<div id="cru_dialog" style="visibility:hidden">';
tmpUploadFrm+=' <div class="hd">Upload Files</div>';
tmpUploadFrm+=' <div class="bd" style="">';
tmpUploadFrm+=' <div class="cru_a">';
tmpUploadFrm+=' <form id="cru_upload_frm">';
tmpUploadFrm+=' <div>Select destination directory for uploaded files:</div>';
tmpUploadFrm+=' <div><select id="cruSel"><option value="">SELECT</option></select></div>';
tmpUploadFrm+=' <div id="cruMsg" style="padding:4px;"></div>';
tmpUploadFrm+=' <div style="margin-top:12px;margin-bottom:16px;">Local File: <input type="file" id="cru_upload_file" name="upload_file"/></div>';
if(showReason){
tmpUploadFrm+=' <div style="margin-bottom:16px;">Justification:<br><textarea id="cru_event_reason" name="event_reason" cols="50" rows="3"></textarea></div>';
}
tmpUploadFrm+=' </form>';
tmpUploadFrm+=' </div>';
tmpUploadFrm+=' </div> ';
tmpUploadFrm+='</div> ';
$("body").append(tmpUploadFrm);
//initialize modal upload dialog
XNAT.app.crUploader.dialog=new YAHOO.widget.Dialog("cru_dialog", { fixedcenter:true, visible:false, width:"500px", height:"300px", modal:true, close:true, draggable:true } ),
XNAT.app.crUploader.dialog.cfg.queueProperty("buttons", [{ text:"Cancel", handler:{fn:function(){XNAT.app.crUploader.dialog.hide();}}},{ text:"Upload", handler:{fn:function(){XNAT.app.crUploader.doUpload();}}, isDefault:true}]);
}
| {
showMessage("page_body","Failed upload.",response.responseText);
} | conditional_block |
extractdicom.go | package bulkprocess
import (
"archive/zip"
"fmt"
"image"
"image/color"
"io"
"log"
"math"
"strconv"
"cloud.google.com/go/storage"
"github.com/suyashkumar/dicom"
"github.com/suyashkumar/dicom/dicomtag"
"github.com/suyashkumar/dicom/element"
)
// ExtractDicomFromGoogleStorage fetches a dicom from within a zipped file in
// Google Storage and returns it as a native go image.Image, optionally with the
// overlay displayed on top.
func ExtractDicomFromGoogleStorage(zipPath, dicomName string, includeOverlay bool, storageClient *storage.Client) (image.Image, error) {
// Read the zip file handle into memory still compressed and turn it into an
// io.ReaderAt which is appropriate for consumption by the zip reader -
// either from a local file, or from Google storage, depending on the prefix
// you provide.
f, nbytes, err := MaybeOpenFromGoogleStorage(zipPath, storageClient)
if err != nil {
return nil, err
}
rc, err := zip.NewReader(f, nbytes)
if err != nil {
return nil, err
}
// Now we have our compressed zip data in an zip.Reader, regardless of its
// origin.
return ExtractDicomFromZipReader(rc, dicomName, includeOverlay)
}
// ExtractDicomFromLocalFile constructs a native go Image type from the dicom
// image with the given name in the given zip file. Now just wraps the
// GoogleStorage variant, since it has the capability of loading local files as
// well as remote ones.
func ExtractDicomFromLocalFile(zipPath, dicomName string, includeOverlay bool) (image.Image, error) {
return ExtractDicomFromGoogleStorage(zipPath, dicomName, includeOverlay, nil)
}
// ExtractDicomFromZipReader consumes a zip reader of the UK Biobank format,
// finds the dicom of the desired name, and returns that image, with or without
// the overlay (if any is present) based on includeOverlay.
func ExtractDicomFromZipReader(rc *zip.Reader, dicomName string, includeOverlay bool) (image.Image, error) {
for _, v := range rc.File {
// Iterate over all of the dicoms in the zip til we find the one with
// the desired name. This is reasonably efficient since we don't need to
// read all of the data to find the right name.
if v.Name != dicomName {
continue
}
dicomReader, err := v.Open()
if err != nil {
return nil, err
}
defer dicomReader.Close()
img, err := ExtractDicomFromReader(dicomReader, int64(v.UncompressedSize64), includeOverlay)
return img, err
}
return nil, fmt.Errorf("Did not find the requested Dicom %s", dicomName)
}
// ExtractDicomFromReader operates on a reader that contains one DICOM.
func ExtractDicomFromReader(dicomReader io.Reader, nReaderBytes int64, includeOverlay bool) (image.Image, error) {
opts := []func(*ExtractDicomOptions){
func(opts *ExtractDicomOptions) {
opts.IncludeOverlay = includeOverlay
},
}
return ExtractDicomFromReaderFuncOp(dicomReader, nReaderBytes, opts...)
}
func ExtractDicomFromReaderFuncOp(dicomReader io.Reader, nReaderBytes int64, options ...func(*ExtractDicomOptions)) (image.Image, error) {
opts := &ExtractDicomOptions{}
for _, opt := range options {
opt(opts)
}
p, err := dicom.NewParser(dicomReader, nReaderBytes, nil)
if err != nil {
return nil, err
}
parsedData, err := SafelyDicomParse(p, dicom.ParseOptions{
DropPixelData: false,
})
if parsedData == nil || err != nil {
return nil, fmt.Errorf("Error reading zip: %v", err)
}
var rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64
_, _, _, _ = rescaleSlope, rescaleIntercept, windowWidth, windowCenter
var bitsAllocated, bitsStored, highBit uint16
_, _, _ = bitsAllocated, bitsStored, highBit
var nOverlayRows, nOverlayCols int
var img *image.Gray16
var imgRows, imgCols int
var imgPixels []int
var overlayPixels []int
for _, elem := range parsedData.Elements {
// The typical approach is to extract bitsAllocated, bitsStored, and the highBit
// and to do transformations on the raw pixel values
if elem.Tag == dicomtag.BitsAllocated {
// log.Printf("BitsAllocated: %+v %T\n", elem.Value, elem.Value[0])
bitsAllocated = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.BitsStored {
// log.Printf("BitsStored: %+v %T\n", elem.Value, elem.Value[0])
bitsStored = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.HighBit {
// log.Printf("HighBit: %+v %T\n", elem.Value, elem.Value[0])
highBit = elem.Value[0].(uint16)
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0010}) == 0 {
nOverlayRows = int(elem.Value[0].(uint16))
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0011}) == 0 {
nOverlayCols = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Rows {
imgRows = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Columns {
imgCols = int(elem.Value[0].(uint16))
}
// If imgPixels is still uninitialized and we're on a rows or columns
// tag, and both rows and columns are populated, initialize imgPixels'
// backing array's capacity to the number of pixels in the image.
if elem.Tag == dicomtag.Rows || elem.Tag == dicomtag.Columns && imgRows*imgCols > 0 && len(imgPixels) == 0 {
imgPixels = make([]int, 0, imgRows*imgCols)
}
if elem.Tag == dicomtag.RescaleSlope {
rescaleSlope, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.RescaleIntercept {
rescaleIntercept, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowWidth {
windowWidth, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowCenter {
windowCenter, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if false {
// Keeping for debugging
if elem.Tag == dicomtag.PixelRepresentation {
log.Printf("PixelRepresentation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleSlope {
log.Printf("RescaleSlope: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleIntercept {
log.Printf("RescaleIntercept: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleType {
log.Printf("RescaleType: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PixelIntensityRelationship {
log.Printf("PixelIntensityRelationship: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PhotometricInterpretation {
log.Printf("PhotometricInterpretation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SamplesPerPixel {
log.Printf("SamplesPerPixel: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.TransferSyntaxUID {
log.Printf("TransferSyntaxUID: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SmallestImagePixelValue {
log.Printf("SmallestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.LargestImagePixelValue {
log.Printf("LargestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.VOILUTFunction {
log.Printf("VOILUTFunction: %+v %T\n", elem.Value, elem.Value[0])
}
}
// Main image
if elem.Tag == dicomtag.PixelData {
data := elem.Value[0].(element.PixelDataInfo)
for _, frame := range data.Frames {
if frame.IsEncapsulated() {
encImg, err := frame.GetImage()
if err != nil {
return nil, fmt.Errorf("Frame is encapsulated, which we did not expect. Additionally, %s", err.Error())
}
// We're done, since it's not clear how to add an overlay
return encImg, nil
}
for j := 0; j < len(frame.NativeData.Data); j++ {
imgPixels = append(imgPixels, frame.NativeData.Data[j][0])
}
}
}
// Extract the overlay, if it exists and we want it
if opts.IncludeOverlay && elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x3000}) == 0 {
// log.Println("Found the Overlay")
// log.Println("Overlay bounds:", nOverlayCols, nOverlayRows)
_, _ = nOverlayCols, nOverlayRows
// We're in the overlay data
for _, enclosed := range elem.Value {
// There should be one enclosure, and it should contain a slice of
// bytes, one byte per pixel.
cellVals, ok := enclosed.([]byte)
if !ok {
continue
}
n_bits := 8
// Fill an array with zeroes, sized the nRows * nCols ( == n_bits *
// len(cellVals) )
overlayPixels = make([]int, n_bits*len(cellVals), n_bits*len(cellVals))
// log.Println("Created a", len(overlayPixels), "array to hold the output")
for i := range cellVals {
byte_as_int := cellVals[i]
for j := 0; j < n_bits; j++ {
// Should be %cols and /cols -- row count is not necessary here
overlayPixels[i*n_bits+j] = int((byte_as_int >> uint(j)) & 1)
}
}
}
}
}
// Identify the brightest pixel
maxIntensity := 0
for _, v := range imgPixels {
if v > maxIntensity {
maxIntensity = v
}
}
// Draw the image
img = image.NewGray16(image.Rect(0, 0, imgCols, imgRows))
for j := 0; j < len(imgPixels); j++ {
leVal := imgPixels[j]
// Should be %cols and /cols -- row count is not necessary here
switch opts.WindowScaling {
case "pythonic":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyPythonicWindowScaling(leVal, maxIntensity)})
case "raw":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyNoWindowScaling(leVal)})
default:
// "official" window scaling
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyOfficialWindowScaling(leVal, rescaleSlope, rescaleIntercept, windowWidth, windowCenter, bitsAllocated)})
} | }
// Draw the overlay
if opts.IncludeOverlay && img != nil && overlayPixels != nil {
// Iterate over the bytes. There will be 1 value for each cell.
// So in a 1024x1024 overlay, you will expect 1,048,576 cells.
for i, overlayValue := range overlayPixels {
row := i / nOverlayCols
col := i % nOverlayCols
if overlayValue != 0 {
img.SetGray16(col, row, color.White)
}
}
}
return img, err
}
// See 'Grayscale Image Display' under
// https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html . In addition,
// we also scale the output so that it is appropriate for producing a 16-bit
// grayscale image. E.g., if the native dicom is 8-bit, we still rescale the
// output here for a 16-bit format. In the future, could produce 8-bit files
// where possible, in which case this function would need to be changed.
func ApplyOfficialWindowScaling(storedValue int, rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64, bitsAllocated uint16) uint16 {
// 1: StoredValue to ModalityValue
var modalityValue float64
if rescaleSlope == 0 {
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html :
// For modalities such as ultrasound and MRI that do not have any units,
// the RescaleSlope and RescaleIntercept are absent and the Modality
// Values are equal to the Stored Values.
modalityValue = float64(storedValue)
} else {
// Otherwise, we can apply the rescale slope and intercept to the stored
// value.
modalityValue = float64(storedValue)*rescaleSlope + rescaleIntercept
}
// 2: ModalityValue to WindowedValue
// The key here is that we're using bitsAllocated (e.g., 16 bits) instead of
// bitsStored (e.g., 11 bits)
var grayLevels float64
switch bitsAllocated {
// Precompute common cases so you're not exponentiating in the hot path
case 16:
grayLevels = 65536
case 8:
grayLevels = 256
default:
grayLevels = math.Pow(2, float64(bitsAllocated))
}
// We are creating a 16-bit image, so we need to scale the modality value to
// the range of 0-65535. Particularly if we're using 8-bit, then we need to
// scale the 0-255 range to 0-65535, otherwise the images will look black.
sixteenBitCorrection := math.MaxUint16 / uint16(grayLevels-1)
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html : For
// ultrasound (and for 8-bit images in general) the WindowWidth and
// WindowCenter may be absent from the file. If absent, they can be assumed
// to be 256 and 128 respectively, which provides an 8-bit identity mapping.
// Here, instead of assuming 8 bit, we use the grayLevels value.
if windowWidth == 0 && windowCenter == 0 {
windowWidth = grayLevels
windowCenter = grayLevels / 2
}
w := windowWidth - 1.0
c := windowCenter - 0.5
// Below the lower bound of our window, draw black
if modalityValue <= c-0.5*w {
return 0
}
// Above the upper bound of our window, draw white
if modalityValue > c+0.5*w {
return uint16(grayLevels-1.0) * sixteenBitCorrection
}
// Within the window, return a scaled value
return uint16(((modalityValue-c)/w+0.5)*(grayLevels-1.0)) * sixteenBitCorrection
}
func ApplyPythonicWindowScaling(intensity, maxIntensity int) uint16 {
if intensity < 0 {
intensity = 0
}
return uint16(float64(math.MaxUint16) * float64(intensity) / float64(maxIntensity))
}
func ApplyNoWindowScaling(intensity int) uint16 {
return uint16(intensity)
} | random_line_split | |
extractdicom.go | package bulkprocess
import (
"archive/zip"
"fmt"
"image"
"image/color"
"io"
"log"
"math"
"strconv"
"cloud.google.com/go/storage"
"github.com/suyashkumar/dicom"
"github.com/suyashkumar/dicom/dicomtag"
"github.com/suyashkumar/dicom/element"
)
// ExtractDicomFromGoogleStorage fetches a dicom from within a zipped file in
// Google Storage and returns it as a native go image.Image, optionally with the
// overlay displayed on top.
func ExtractDicomFromGoogleStorage(zipPath, dicomName string, includeOverlay bool, storageClient *storage.Client) (image.Image, error) {
// Read the zip file handle into memory still compressed and turn it into an
// io.ReaderAt which is appropriate for consumption by the zip reader -
// either from a local file, or from Google storage, depending on the prefix
// you provide.
f, nbytes, err := MaybeOpenFromGoogleStorage(zipPath, storageClient)
if err != nil {
return nil, err
}
rc, err := zip.NewReader(f, nbytes)
if err != nil {
return nil, err
}
// Now we have our compressed zip data in an zip.Reader, regardless of its
// origin.
return ExtractDicomFromZipReader(rc, dicomName, includeOverlay)
}
// ExtractDicomFromLocalFile constructs a native go Image type from the dicom
// image with the given name in the given zip file. Now just wraps the
// GoogleStorage variant, since it has the capability of loading local files as
// well as remote ones.
func ExtractDicomFromLocalFile(zipPath, dicomName string, includeOverlay bool) (image.Image, error) {
return ExtractDicomFromGoogleStorage(zipPath, dicomName, includeOverlay, nil)
}
// ExtractDicomFromZipReader consumes a zip reader of the UK Biobank format,
// finds the dicom of the desired name, and returns that image, with or without
// the overlay (if any is present) based on includeOverlay.
func ExtractDicomFromZipReader(rc *zip.Reader, dicomName string, includeOverlay bool) (image.Image, error) {
for _, v := range rc.File {
// Iterate over all of the dicoms in the zip til we find the one with
// the desired name. This is reasonably efficient since we don't need to
// read all of the data to find the right name.
if v.Name != dicomName {
continue
}
dicomReader, err := v.Open()
if err != nil {
return nil, err
}
defer dicomReader.Close()
img, err := ExtractDicomFromReader(dicomReader, int64(v.UncompressedSize64), includeOverlay)
return img, err
}
return nil, fmt.Errorf("Did not find the requested Dicom %s", dicomName)
}
// ExtractDicomFromReader operates on a reader that contains one DICOM.
func ExtractDicomFromReader(dicomReader io.Reader, nReaderBytes int64, includeOverlay bool) (image.Image, error) {
opts := []func(*ExtractDicomOptions){
func(opts *ExtractDicomOptions) {
opts.IncludeOverlay = includeOverlay
},
}
return ExtractDicomFromReaderFuncOp(dicomReader, nReaderBytes, opts...)
}
func ExtractDicomFromReaderFuncOp(dicomReader io.Reader, nReaderBytes int64, options ...func(*ExtractDicomOptions)) (image.Image, error) {
opts := &ExtractDicomOptions{}
for _, opt := range options {
opt(opts)
}
p, err := dicom.NewParser(dicomReader, nReaderBytes, nil)
if err != nil {
return nil, err
}
parsedData, err := SafelyDicomParse(p, dicom.ParseOptions{
DropPixelData: false,
})
if parsedData == nil || err != nil {
return nil, fmt.Errorf("Error reading zip: %v", err)
}
var rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64
_, _, _, _ = rescaleSlope, rescaleIntercept, windowWidth, windowCenter
var bitsAllocated, bitsStored, highBit uint16
_, _, _ = bitsAllocated, bitsStored, highBit
var nOverlayRows, nOverlayCols int
var img *image.Gray16
var imgRows, imgCols int
var imgPixels []int
var overlayPixels []int
for _, elem := range parsedData.Elements {
// The typical approach is to extract bitsAllocated, bitsStored, and the highBit
// and to do transformations on the raw pixel values
if elem.Tag == dicomtag.BitsAllocated {
// log.Printf("BitsAllocated: %+v %T\n", elem.Value, elem.Value[0])
bitsAllocated = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.BitsStored {
// log.Printf("BitsStored: %+v %T\n", elem.Value, elem.Value[0])
bitsStored = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.HighBit {
// log.Printf("HighBit: %+v %T\n", elem.Value, elem.Value[0])
highBit = elem.Value[0].(uint16)
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0010}) == 0 {
nOverlayRows = int(elem.Value[0].(uint16))
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0011}) == 0 {
nOverlayCols = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Rows {
imgRows = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Columns {
imgCols = int(elem.Value[0].(uint16))
}
// If imgPixels is still uninitialized and we're on a rows or columns
// tag, and both rows and columns are populated, initialize imgPixels'
// backing array's capacity to the number of pixels in the image.
if elem.Tag == dicomtag.Rows || elem.Tag == dicomtag.Columns && imgRows*imgCols > 0 && len(imgPixels) == 0 {
imgPixels = make([]int, 0, imgRows*imgCols)
}
if elem.Tag == dicomtag.RescaleSlope {
rescaleSlope, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.RescaleIntercept {
rescaleIntercept, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowWidth {
windowWidth, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowCenter {
windowCenter, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if false {
// Keeping for debugging
if elem.Tag == dicomtag.PixelRepresentation {
log.Printf("PixelRepresentation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleSlope {
log.Printf("RescaleSlope: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleIntercept {
log.Printf("RescaleIntercept: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleType {
log.Printf("RescaleType: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PixelIntensityRelationship {
log.Printf("PixelIntensityRelationship: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PhotometricInterpretation {
log.Printf("PhotometricInterpretation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SamplesPerPixel {
log.Printf("SamplesPerPixel: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.TransferSyntaxUID {
log.Printf("TransferSyntaxUID: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SmallestImagePixelValue {
log.Printf("SmallestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.LargestImagePixelValue {
log.Printf("LargestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.VOILUTFunction {
log.Printf("VOILUTFunction: %+v %T\n", elem.Value, elem.Value[0])
}
}
// Main image
if elem.Tag == dicomtag.PixelData {
data := elem.Value[0].(element.PixelDataInfo)
for _, frame := range data.Frames {
if frame.IsEncapsulated() {
encImg, err := frame.GetImage()
if err != nil {
return nil, fmt.Errorf("Frame is encapsulated, which we did not expect. Additionally, %s", err.Error())
}
// We're done, since it's not clear how to add an overlay
return encImg, nil
}
for j := 0; j < len(frame.NativeData.Data); j++ {
imgPixels = append(imgPixels, frame.NativeData.Data[j][0])
}
}
}
// Extract the overlay, if it exists and we want it
if opts.IncludeOverlay && elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x3000}) == 0 {
// log.Println("Found the Overlay")
// log.Println("Overlay bounds:", nOverlayCols, nOverlayRows)
_, _ = nOverlayCols, nOverlayRows
// We're in the overlay data
for _, enclosed := range elem.Value {
// There should be one enclosure, and it should contain a slice of
// bytes, one byte per pixel.
cellVals, ok := enclosed.([]byte)
if !ok {
continue
}
n_bits := 8
// Fill an array with zeroes, sized the nRows * nCols ( == n_bits *
// len(cellVals) )
overlayPixels = make([]int, n_bits*len(cellVals), n_bits*len(cellVals))
// log.Println("Created a", len(overlayPixels), "array to hold the output")
for i := range cellVals {
byte_as_int := cellVals[i]
for j := 0; j < n_bits; j++ {
// Should be %cols and /cols -- row count is not necessary here
overlayPixels[i*n_bits+j] = int((byte_as_int >> uint(j)) & 1)
}
}
}
}
}
// Identify the brightest pixel
maxIntensity := 0
for _, v := range imgPixels {
if v > maxIntensity {
maxIntensity = v
}
}
// Draw the image
img = image.NewGray16(image.Rect(0, 0, imgCols, imgRows))
for j := 0; j < len(imgPixels); j++ {
leVal := imgPixels[j]
// Should be %cols and /cols -- row count is not necessary here
switch opts.WindowScaling {
case "pythonic":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyPythonicWindowScaling(leVal, maxIntensity)})
case "raw":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyNoWindowScaling(leVal)})
default:
// "official" window scaling
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyOfficialWindowScaling(leVal, rescaleSlope, rescaleIntercept, windowWidth, windowCenter, bitsAllocated)})
}
}
// Draw the overlay
if opts.IncludeOverlay && img != nil && overlayPixels != nil {
// Iterate over the bytes. There will be 1 value for each cell.
// So in a 1024x1024 overlay, you will expect 1,048,576 cells.
for i, overlayValue := range overlayPixels {
row := i / nOverlayCols
col := i % nOverlayCols
if overlayValue != 0 {
img.SetGray16(col, row, color.White)
}
}
}
return img, err
}
// See 'Grayscale Image Display' under
// https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html . In addition,
// we also scale the output so that it is appropriate for producing a 16-bit
// grayscale image. E.g., if the native dicom is 8-bit, we still rescale the
// output here for a 16-bit format. In the future, could produce 8-bit files
// where possible, in which case this function would need to be changed.
func ApplyOfficialWindowScaling(storedValue int, rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64, bitsAllocated uint16) uint16 |
func ApplyPythonicWindowScaling(intensity, maxIntensity int) uint16 {
if intensity < 0 {
intensity = 0
}
return uint16(float64(math.MaxUint16) * float64(intensity) / float64(maxIntensity))
}
func ApplyNoWindowScaling(intensity int) uint16 {
return uint16(intensity)
}
| {
// 1: StoredValue to ModalityValue
var modalityValue float64
if rescaleSlope == 0 {
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html :
// For modalities such as ultrasound and MRI that do not have any units,
// the RescaleSlope and RescaleIntercept are absent and the Modality
// Values are equal to the Stored Values.
modalityValue = float64(storedValue)
} else {
// Otherwise, we can apply the rescale slope and intercept to the stored
// value.
modalityValue = float64(storedValue)*rescaleSlope + rescaleIntercept
}
// 2: ModalityValue to WindowedValue
// The key here is that we're using bitsAllocated (e.g., 16 bits) instead of
// bitsStored (e.g., 11 bits)
var grayLevels float64
switch bitsAllocated {
// Precompute common cases so you're not exponentiating in the hot path
case 16:
grayLevels = 65536
case 8:
grayLevels = 256
default:
grayLevels = math.Pow(2, float64(bitsAllocated))
}
// We are creating a 16-bit image, so we need to scale the modality value to
// the range of 0-65535. Particularly if we're using 8-bit, then we need to
// scale the 0-255 range to 0-65535, otherwise the images will look black.
sixteenBitCorrection := math.MaxUint16 / uint16(grayLevels-1)
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html : For
// ultrasound (and for 8-bit images in general) the WindowWidth and
// WindowCenter may be absent from the file. If absent, they can be assumed
// to be 256 and 128 respectively, which provides an 8-bit identity mapping.
// Here, instead of assuming 8 bit, we use the grayLevels value.
if windowWidth == 0 && windowCenter == 0 {
windowWidth = grayLevels
windowCenter = grayLevels / 2
}
w := windowWidth - 1.0
c := windowCenter - 0.5
// Below the lower bound of our window, draw black
if modalityValue <= c-0.5*w {
return 0
}
// Above the upper bound of our window, draw white
if modalityValue > c+0.5*w {
return uint16(grayLevels-1.0) * sixteenBitCorrection
}
// Within the window, return a scaled value
return uint16(((modalityValue-c)/w+0.5)*(grayLevels-1.0)) * sixteenBitCorrection
} | identifier_body |
extractdicom.go | package bulkprocess
import (
"archive/zip"
"fmt"
"image"
"image/color"
"io"
"log"
"math"
"strconv"
"cloud.google.com/go/storage"
"github.com/suyashkumar/dicom"
"github.com/suyashkumar/dicom/dicomtag"
"github.com/suyashkumar/dicom/element"
)
// ExtractDicomFromGoogleStorage fetches a dicom from within a zipped file in
// Google Storage and returns it as a native go image.Image, optionally with the
// overlay displayed on top.
func ExtractDicomFromGoogleStorage(zipPath, dicomName string, includeOverlay bool, storageClient *storage.Client) (image.Image, error) {
// Read the zip file handle into memory still compressed and turn it into an
// io.ReaderAt which is appropriate for consumption by the zip reader -
// either from a local file, or from Google storage, depending on the prefix
// you provide.
f, nbytes, err := MaybeOpenFromGoogleStorage(zipPath, storageClient)
if err != nil {
return nil, err
}
rc, err := zip.NewReader(f, nbytes)
if err != nil {
return nil, err
}
// Now we have our compressed zip data in an zip.Reader, regardless of its
// origin.
return ExtractDicomFromZipReader(rc, dicomName, includeOverlay)
}
// ExtractDicomFromLocalFile constructs a native go Image type from the dicom
// image with the given name in the given zip file. Now just wraps the
// GoogleStorage variant, since it has the capability of loading local files as
// well as remote ones.
func | (zipPath, dicomName string, includeOverlay bool) (image.Image, error) {
return ExtractDicomFromGoogleStorage(zipPath, dicomName, includeOverlay, nil)
}
// ExtractDicomFromZipReader consumes a zip reader of the UK Biobank format,
// finds the dicom of the desired name, and returns that image, with or without
// the overlay (if any is present) based on includeOverlay.
func ExtractDicomFromZipReader(rc *zip.Reader, dicomName string, includeOverlay bool) (image.Image, error) {
for _, v := range rc.File {
// Iterate over all of the dicoms in the zip til we find the one with
// the desired name. This is reasonably efficient since we don't need to
// read all of the data to find the right name.
if v.Name != dicomName {
continue
}
dicomReader, err := v.Open()
if err != nil {
return nil, err
}
defer dicomReader.Close()
img, err := ExtractDicomFromReader(dicomReader, int64(v.UncompressedSize64), includeOverlay)
return img, err
}
return nil, fmt.Errorf("Did not find the requested Dicom %s", dicomName)
}
// ExtractDicomFromReader operates on a reader that contains one DICOM.
func ExtractDicomFromReader(dicomReader io.Reader, nReaderBytes int64, includeOverlay bool) (image.Image, error) {
opts := []func(*ExtractDicomOptions){
func(opts *ExtractDicomOptions) {
opts.IncludeOverlay = includeOverlay
},
}
return ExtractDicomFromReaderFuncOp(dicomReader, nReaderBytes, opts...)
}
func ExtractDicomFromReaderFuncOp(dicomReader io.Reader, nReaderBytes int64, options ...func(*ExtractDicomOptions)) (image.Image, error) {
opts := &ExtractDicomOptions{}
for _, opt := range options {
opt(opts)
}
p, err := dicom.NewParser(dicomReader, nReaderBytes, nil)
if err != nil {
return nil, err
}
parsedData, err := SafelyDicomParse(p, dicom.ParseOptions{
DropPixelData: false,
})
if parsedData == nil || err != nil {
return nil, fmt.Errorf("Error reading zip: %v", err)
}
var rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64
_, _, _, _ = rescaleSlope, rescaleIntercept, windowWidth, windowCenter
var bitsAllocated, bitsStored, highBit uint16
_, _, _ = bitsAllocated, bitsStored, highBit
var nOverlayRows, nOverlayCols int
var img *image.Gray16
var imgRows, imgCols int
var imgPixels []int
var overlayPixels []int
for _, elem := range parsedData.Elements {
// The typical approach is to extract bitsAllocated, bitsStored, and the highBit
// and to do transformations on the raw pixel values
if elem.Tag == dicomtag.BitsAllocated {
// log.Printf("BitsAllocated: %+v %T\n", elem.Value, elem.Value[0])
bitsAllocated = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.BitsStored {
// log.Printf("BitsStored: %+v %T\n", elem.Value, elem.Value[0])
bitsStored = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.HighBit {
// log.Printf("HighBit: %+v %T\n", elem.Value, elem.Value[0])
highBit = elem.Value[0].(uint16)
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0010}) == 0 {
nOverlayRows = int(elem.Value[0].(uint16))
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0011}) == 0 {
nOverlayCols = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Rows {
imgRows = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Columns {
imgCols = int(elem.Value[0].(uint16))
}
// If imgPixels is still uninitialized and we're on a rows or columns
// tag, and both rows and columns are populated, initialize imgPixels'
// backing array's capacity to the number of pixels in the image.
if elem.Tag == dicomtag.Rows || elem.Tag == dicomtag.Columns && imgRows*imgCols > 0 && len(imgPixels) == 0 {
imgPixels = make([]int, 0, imgRows*imgCols)
}
if elem.Tag == dicomtag.RescaleSlope {
rescaleSlope, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.RescaleIntercept {
rescaleIntercept, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowWidth {
windowWidth, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowCenter {
windowCenter, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if false {
// Keeping for debugging
if elem.Tag == dicomtag.PixelRepresentation {
log.Printf("PixelRepresentation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleSlope {
log.Printf("RescaleSlope: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleIntercept {
log.Printf("RescaleIntercept: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleType {
log.Printf("RescaleType: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PixelIntensityRelationship {
log.Printf("PixelIntensityRelationship: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PhotometricInterpretation {
log.Printf("PhotometricInterpretation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SamplesPerPixel {
log.Printf("SamplesPerPixel: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.TransferSyntaxUID {
log.Printf("TransferSyntaxUID: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SmallestImagePixelValue {
log.Printf("SmallestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.LargestImagePixelValue {
log.Printf("LargestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.VOILUTFunction {
log.Printf("VOILUTFunction: %+v %T\n", elem.Value, elem.Value[0])
}
}
// Main image
if elem.Tag == dicomtag.PixelData {
data := elem.Value[0].(element.PixelDataInfo)
for _, frame := range data.Frames {
if frame.IsEncapsulated() {
encImg, err := frame.GetImage()
if err != nil {
return nil, fmt.Errorf("Frame is encapsulated, which we did not expect. Additionally, %s", err.Error())
}
// We're done, since it's not clear how to add an overlay
return encImg, nil
}
for j := 0; j < len(frame.NativeData.Data); j++ {
imgPixels = append(imgPixels, frame.NativeData.Data[j][0])
}
}
}
// Extract the overlay, if it exists and we want it
if opts.IncludeOverlay && elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x3000}) == 0 {
// log.Println("Found the Overlay")
// log.Println("Overlay bounds:", nOverlayCols, nOverlayRows)
_, _ = nOverlayCols, nOverlayRows
// We're in the overlay data
for _, enclosed := range elem.Value {
// There should be one enclosure, and it should contain a slice of
// bytes, one byte per pixel.
cellVals, ok := enclosed.([]byte)
if !ok {
continue
}
n_bits := 8
// Fill an array with zeroes, sized the nRows * nCols ( == n_bits *
// len(cellVals) )
overlayPixels = make([]int, n_bits*len(cellVals), n_bits*len(cellVals))
// log.Println("Created a", len(overlayPixels), "array to hold the output")
for i := range cellVals {
byte_as_int := cellVals[i]
for j := 0; j < n_bits; j++ {
// Should be %cols and /cols -- row count is not necessary here
overlayPixels[i*n_bits+j] = int((byte_as_int >> uint(j)) & 1)
}
}
}
}
}
// Identify the brightest pixel
maxIntensity := 0
for _, v := range imgPixels {
if v > maxIntensity {
maxIntensity = v
}
}
// Draw the image
img = image.NewGray16(image.Rect(0, 0, imgCols, imgRows))
for j := 0; j < len(imgPixels); j++ {
leVal := imgPixels[j]
// Should be %cols and /cols -- row count is not necessary here
switch opts.WindowScaling {
case "pythonic":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyPythonicWindowScaling(leVal, maxIntensity)})
case "raw":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyNoWindowScaling(leVal)})
default:
// "official" window scaling
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyOfficialWindowScaling(leVal, rescaleSlope, rescaleIntercept, windowWidth, windowCenter, bitsAllocated)})
}
}
// Draw the overlay
if opts.IncludeOverlay && img != nil && overlayPixels != nil {
// Iterate over the bytes. There will be 1 value for each cell.
// So in a 1024x1024 overlay, you will expect 1,048,576 cells.
for i, overlayValue := range overlayPixels {
row := i / nOverlayCols
col := i % nOverlayCols
if overlayValue != 0 {
img.SetGray16(col, row, color.White)
}
}
}
return img, err
}
// See 'Grayscale Image Display' under
// https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html . In addition,
// we also scale the output so that it is appropriate for producing a 16-bit
// grayscale image. E.g., if the native dicom is 8-bit, we still rescale the
// output here for a 16-bit format. In the future, could produce 8-bit files
// where possible, in which case this function would need to be changed.
func ApplyOfficialWindowScaling(storedValue int, rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64, bitsAllocated uint16) uint16 {
// 1: StoredValue to ModalityValue
var modalityValue float64
if rescaleSlope == 0 {
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html :
// For modalities such as ultrasound and MRI that do not have any units,
// the RescaleSlope and RescaleIntercept are absent and the Modality
// Values are equal to the Stored Values.
modalityValue = float64(storedValue)
} else {
// Otherwise, we can apply the rescale slope and intercept to the stored
// value.
modalityValue = float64(storedValue)*rescaleSlope + rescaleIntercept
}
// 2: ModalityValue to WindowedValue
// The key here is that we're using bitsAllocated (e.g., 16 bits) instead of
// bitsStored (e.g., 11 bits)
var grayLevels float64
switch bitsAllocated {
// Precompute common cases so you're not exponentiating in the hot path
case 16:
grayLevels = 65536
case 8:
grayLevels = 256
default:
grayLevels = math.Pow(2, float64(bitsAllocated))
}
// We are creating a 16-bit image, so we need to scale the modality value to
// the range of 0-65535. Particularly if we're using 8-bit, then we need to
// scale the 0-255 range to 0-65535, otherwise the images will look black.
sixteenBitCorrection := math.MaxUint16 / uint16(grayLevels-1)
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html : For
// ultrasound (and for 8-bit images in general) the WindowWidth and
// WindowCenter may be absent from the file. If absent, they can be assumed
// to be 256 and 128 respectively, which provides an 8-bit identity mapping.
// Here, instead of assuming 8 bit, we use the grayLevels value.
if windowWidth == 0 && windowCenter == 0 {
windowWidth = grayLevels
windowCenter = grayLevels / 2
}
w := windowWidth - 1.0
c := windowCenter - 0.5
// Below the lower bound of our window, draw black
if modalityValue <= c-0.5*w {
return 0
}
// Above the upper bound of our window, draw white
if modalityValue > c+0.5*w {
return uint16(grayLevels-1.0) * sixteenBitCorrection
}
// Within the window, return a scaled value
return uint16(((modalityValue-c)/w+0.5)*(grayLevels-1.0)) * sixteenBitCorrection
}
func ApplyPythonicWindowScaling(intensity, maxIntensity int) uint16 {
if intensity < 0 {
intensity = 0
}
return uint16(float64(math.MaxUint16) * float64(intensity) / float64(maxIntensity))
}
func ApplyNoWindowScaling(intensity int) uint16 {
return uint16(intensity)
}
| ExtractDicomFromLocalFile | identifier_name |
extractdicom.go | package bulkprocess
import (
"archive/zip"
"fmt"
"image"
"image/color"
"io"
"log"
"math"
"strconv"
"cloud.google.com/go/storage"
"github.com/suyashkumar/dicom"
"github.com/suyashkumar/dicom/dicomtag"
"github.com/suyashkumar/dicom/element"
)
// ExtractDicomFromGoogleStorage fetches a dicom from within a zipped file in
// Google Storage and returns it as a native go image.Image, optionally with the
// overlay displayed on top.
func ExtractDicomFromGoogleStorage(zipPath, dicomName string, includeOverlay bool, storageClient *storage.Client) (image.Image, error) {
// Read the zip file handle into memory still compressed and turn it into an
// io.ReaderAt which is appropriate for consumption by the zip reader -
// either from a local file, or from Google storage, depending on the prefix
// you provide.
f, nbytes, err := MaybeOpenFromGoogleStorage(zipPath, storageClient)
if err != nil {
return nil, err
}
rc, err := zip.NewReader(f, nbytes)
if err != nil {
return nil, err
}
// Now we have our compressed zip data in an zip.Reader, regardless of its
// origin.
return ExtractDicomFromZipReader(rc, dicomName, includeOverlay)
}
// ExtractDicomFromLocalFile constructs a native go Image type from the dicom
// image with the given name in the given zip file. Now just wraps the
// GoogleStorage variant, since it has the capability of loading local files as
// well as remote ones.
func ExtractDicomFromLocalFile(zipPath, dicomName string, includeOverlay bool) (image.Image, error) {
return ExtractDicomFromGoogleStorage(zipPath, dicomName, includeOverlay, nil)
}
// ExtractDicomFromZipReader consumes a zip reader of the UK Biobank format,
// finds the dicom of the desired name, and returns that image, with or without
// the overlay (if any is present) based on includeOverlay.
func ExtractDicomFromZipReader(rc *zip.Reader, dicomName string, includeOverlay bool) (image.Image, error) {
for _, v := range rc.File {
// Iterate over all of the dicoms in the zip til we find the one with
// the desired name. This is reasonably efficient since we don't need to
// read all of the data to find the right name.
if v.Name != dicomName {
continue
}
dicomReader, err := v.Open()
if err != nil {
return nil, err
}
defer dicomReader.Close()
img, err := ExtractDicomFromReader(dicomReader, int64(v.UncompressedSize64), includeOverlay)
return img, err
}
return nil, fmt.Errorf("Did not find the requested Dicom %s", dicomName)
}
// ExtractDicomFromReader operates on a reader that contains one DICOM.
func ExtractDicomFromReader(dicomReader io.Reader, nReaderBytes int64, includeOverlay bool) (image.Image, error) {
opts := []func(*ExtractDicomOptions){
func(opts *ExtractDicomOptions) {
opts.IncludeOverlay = includeOverlay
},
}
return ExtractDicomFromReaderFuncOp(dicomReader, nReaderBytes, opts...)
}
func ExtractDicomFromReaderFuncOp(dicomReader io.Reader, nReaderBytes int64, options ...func(*ExtractDicomOptions)) (image.Image, error) {
opts := &ExtractDicomOptions{}
for _, opt := range options {
opt(opts)
}
p, err := dicom.NewParser(dicomReader, nReaderBytes, nil)
if err != nil {
return nil, err
}
parsedData, err := SafelyDicomParse(p, dicom.ParseOptions{
DropPixelData: false,
})
if parsedData == nil || err != nil {
return nil, fmt.Errorf("Error reading zip: %v", err)
}
var rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64
_, _, _, _ = rescaleSlope, rescaleIntercept, windowWidth, windowCenter
var bitsAllocated, bitsStored, highBit uint16
_, _, _ = bitsAllocated, bitsStored, highBit
var nOverlayRows, nOverlayCols int
var img *image.Gray16
var imgRows, imgCols int
var imgPixels []int
var overlayPixels []int
for _, elem := range parsedData.Elements {
// The typical approach is to extract bitsAllocated, bitsStored, and the highBit
// and to do transformations on the raw pixel values
if elem.Tag == dicomtag.BitsAllocated {
// log.Printf("BitsAllocated: %+v %T\n", elem.Value, elem.Value[0])
bitsAllocated = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.BitsStored {
// log.Printf("BitsStored: %+v %T\n", elem.Value, elem.Value[0])
bitsStored = elem.Value[0].(uint16)
} else if elem.Tag == dicomtag.HighBit {
// log.Printf("HighBit: %+v %T\n", elem.Value, elem.Value[0])
highBit = elem.Value[0].(uint16)
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0010}) == 0 {
nOverlayRows = int(elem.Value[0].(uint16))
} else if elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x0011}) == 0 {
nOverlayCols = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Rows {
imgRows = int(elem.Value[0].(uint16))
} else if elem.Tag == dicomtag.Columns {
imgCols = int(elem.Value[0].(uint16))
}
// If imgPixels is still uninitialized and we're on a rows or columns
// tag, and both rows and columns are populated, initialize imgPixels'
// backing array's capacity to the number of pixels in the image.
if elem.Tag == dicomtag.Rows || elem.Tag == dicomtag.Columns && imgRows*imgCols > 0 && len(imgPixels) == 0 {
imgPixels = make([]int, 0, imgRows*imgCols)
}
if elem.Tag == dicomtag.RescaleSlope {
rescaleSlope, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.RescaleIntercept {
rescaleIntercept, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowWidth {
windowWidth, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if elem.Tag == dicomtag.WindowCenter {
windowCenter, err = strconv.ParseFloat(elem.Value[0].(string), 64)
if err != nil {
log.Println(err)
}
}
if false {
// Keeping for debugging
if elem.Tag == dicomtag.PixelRepresentation {
log.Printf("PixelRepresentation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleSlope {
log.Printf("RescaleSlope: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleIntercept {
log.Printf("RescaleIntercept: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.RescaleType {
log.Printf("RescaleType: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PixelIntensityRelationship {
log.Printf("PixelIntensityRelationship: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.PhotometricInterpretation {
log.Printf("PhotometricInterpretation: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SamplesPerPixel {
log.Printf("SamplesPerPixel: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.TransferSyntaxUID {
log.Printf("TransferSyntaxUID: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.SmallestImagePixelValue {
log.Printf("SmallestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.LargestImagePixelValue {
log.Printf("LargestImagePixelValue: %+v %T\n", elem.Value, elem.Value[0])
} else if elem.Tag == dicomtag.VOILUTFunction {
log.Printf("VOILUTFunction: %+v %T\n", elem.Value, elem.Value[0])
}
}
// Main image
if elem.Tag == dicomtag.PixelData {
data := elem.Value[0].(element.PixelDataInfo)
for _, frame := range data.Frames {
if frame.IsEncapsulated() {
encImg, err := frame.GetImage()
if err != nil {
return nil, fmt.Errorf("Frame is encapsulated, which we did not expect. Additionally, %s", err.Error())
}
// We're done, since it's not clear how to add an overlay
return encImg, nil
}
for j := 0; j < len(frame.NativeData.Data); j++ {
imgPixels = append(imgPixels, frame.NativeData.Data[j][0])
}
}
}
// Extract the overlay, if it exists and we want it
if opts.IncludeOverlay && elem.Tag.Compare(dicomtag.Tag{Group: 0x6000, Element: 0x3000}) == 0 {
// log.Println("Found the Overlay")
// log.Println("Overlay bounds:", nOverlayCols, nOverlayRows)
_, _ = nOverlayCols, nOverlayRows
// We're in the overlay data
for _, enclosed := range elem.Value {
// There should be one enclosure, and it should contain a slice of
// bytes, one byte per pixel.
cellVals, ok := enclosed.([]byte)
if !ok {
continue
}
n_bits := 8
// Fill an array with zeroes, sized the nRows * nCols ( == n_bits *
// len(cellVals) )
overlayPixels = make([]int, n_bits*len(cellVals), n_bits*len(cellVals))
// log.Println("Created a", len(overlayPixels), "array to hold the output")
for i := range cellVals {
byte_as_int := cellVals[i]
for j := 0; j < n_bits; j++ {
// Should be %cols and /cols -- row count is not necessary here
overlayPixels[i*n_bits+j] = int((byte_as_int >> uint(j)) & 1)
}
}
}
}
}
// Identify the brightest pixel
maxIntensity := 0
for _, v := range imgPixels {
if v > maxIntensity {
maxIntensity = v
}
}
// Draw the image
img = image.NewGray16(image.Rect(0, 0, imgCols, imgRows))
for j := 0; j < len(imgPixels); j++ {
leVal := imgPixels[j]
// Should be %cols and /cols -- row count is not necessary here
switch opts.WindowScaling {
case "pythonic":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyPythonicWindowScaling(leVal, maxIntensity)})
case "raw":
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyNoWindowScaling(leVal)})
default:
// "official" window scaling
img.SetGray16(j%imgCols, j/imgCols, color.Gray16{Y: ApplyOfficialWindowScaling(leVal, rescaleSlope, rescaleIntercept, windowWidth, windowCenter, bitsAllocated)})
}
}
// Draw the overlay
if opts.IncludeOverlay && img != nil && overlayPixels != nil {
// Iterate over the bytes. There will be 1 value for each cell.
// So in a 1024x1024 overlay, you will expect 1,048,576 cells.
for i, overlayValue := range overlayPixels |
}
return img, err
}
// See 'Grayscale Image Display' under
// https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html . In addition,
// we also scale the output so that it is appropriate for producing a 16-bit
// grayscale image. E.g., if the native dicom is 8-bit, we still rescale the
// output here for a 16-bit format. In the future, could produce 8-bit files
// where possible, in which case this function would need to be changed.
func ApplyOfficialWindowScaling(storedValue int, rescaleSlope, rescaleIntercept, windowWidth, windowCenter float64, bitsAllocated uint16) uint16 {
// 1: StoredValue to ModalityValue
var modalityValue float64
if rescaleSlope == 0 {
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html :
// For modalities such as ultrasound and MRI that do not have any units,
// the RescaleSlope and RescaleIntercept are absent and the Modality
// Values are equal to the Stored Values.
modalityValue = float64(storedValue)
} else {
// Otherwise, we can apply the rescale slope and intercept to the stored
// value.
modalityValue = float64(storedValue)*rescaleSlope + rescaleIntercept
}
// 2: ModalityValue to WindowedValue
// The key here is that we're using bitsAllocated (e.g., 16 bits) instead of
// bitsStored (e.g., 11 bits)
var grayLevels float64
switch bitsAllocated {
// Precompute common cases so you're not exponentiating in the hot path
case 16:
grayLevels = 65536
case 8:
grayLevels = 256
default:
grayLevels = math.Pow(2, float64(bitsAllocated))
}
// We are creating a 16-bit image, so we need to scale the modality value to
// the range of 0-65535. Particularly if we're using 8-bit, then we need to
// scale the 0-255 range to 0-65535, otherwise the images will look black.
sixteenBitCorrection := math.MaxUint16 / uint16(grayLevels-1)
// Via https://dgobbi.github.io/vtk-dicom/doc/api/image_display.html : For
// ultrasound (and for 8-bit images in general) the WindowWidth and
// WindowCenter may be absent from the file. If absent, they can be assumed
// to be 256 and 128 respectively, which provides an 8-bit identity mapping.
// Here, instead of assuming 8 bit, we use the grayLevels value.
if windowWidth == 0 && windowCenter == 0 {
windowWidth = grayLevels
windowCenter = grayLevels / 2
}
w := windowWidth - 1.0
c := windowCenter - 0.5
// Below the lower bound of our window, draw black
if modalityValue <= c-0.5*w {
return 0
}
// Above the upper bound of our window, draw white
if modalityValue > c+0.5*w {
return uint16(grayLevels-1.0) * sixteenBitCorrection
}
// Within the window, return a scaled value
return uint16(((modalityValue-c)/w+0.5)*(grayLevels-1.0)) * sixteenBitCorrection
}
func ApplyPythonicWindowScaling(intensity, maxIntensity int) uint16 {
if intensity < 0 {
intensity = 0
}
return uint16(float64(math.MaxUint16) * float64(intensity) / float64(maxIntensity))
}
func ApplyNoWindowScaling(intensity int) uint16 {
return uint16(intensity)
}
| {
row := i / nOverlayCols
col := i % nOverlayCols
if overlayValue != 0 {
img.SetGray16(col, row, color.White)
}
} | conditional_block |
lib.rs | //! A thread-safe object pool with automatic return and attach/detach semantics
//!
//! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects
//!
//! # Examples
//!
//! ## Creating a Pool
//!
//! The general pool creation looks like this
//! ```
//! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new());
//! ```
//! Example pool with 32 `Vec<u8>` with capacity of 4096
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! ```
//!
//! ## Using a Pool
//!
//! Basic usage for pulling from the pool
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! some_file.read_to_end(reusable_buff);
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//! Pull from pool and `detach()`
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! let (pool, reusable_buff) = reusable_buff.detach();
//! let mut s = String::from(reusable_buff);
//! s.push_str("hello, world!");
//! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//!
//! ## Using Across Threads
//!
//! You simply wrap the pool in a [`std::sync::Arc`]
//! ```
//! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new()));
//! ```
//!
//! # Warning
//!
//! Objects in the pool are not automatically reset, they are returned but NOT reset
//! You may want to call `object.reset()` or `object.clear()`
//! or any other equivalent for the object that you are using, after pulling from the pool
//!
//! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
mod multi_buf;
mod semphore;
pub use multi_buf::{MultiBuffer, GetSegs};
use crossbeam::channel;
use std::ops::{Deref, DerefMut};
use parking_lot::{Mutex, Condvar};
use std::mem::{ManuallyDrop, forget};
use std::sync::Arc;
use std::thread;
use log::{trace};
pub use semphore::Semphore;
use parking_lot::lock_api::MutexGuard;
use futures::SinkExt;
use std::thread::sleep;
pub type Stack<T> = Vec<T>;
pub struct PendingInfo<T>
where T: Sync + Send + 'static
{
id: String,
notifier: channel::Sender<T>,
}
pub struct WaitingInfo<T>
where T: Sync + Send + 'static
{
id: String,
//发送恢复命令
notifier: channel::Sender<T>,
///最低需要多少个内存单元才能恢复
min_request: usize,
}
pub struct MemoryPool<T>
where T: Sync + Send + 'static
{
objects: (channel::Sender<T>, channel::Receiver<T>),
// the one wait for data
pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>,
///those who is sleeping
waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>,
run_block: Arc<Mutex<()>>,
pending_block: Arc<Mutex<()>>,
// recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>),
}
impl<T> MemoryPool<T> where T: Sync + Send + 'static {
#[inline]
pub fn new<F>(cap: usize, init: F) -> MemoryPool<T>
where
F: Fn() -> T,
{
// //println!("mempool remains:{}", cap);
log::trace!("mempool remains:{}", cap);
let mut objects = channel::unbounded();
for _ in 0..cap {
&objects.0.send(init());
}
MemoryPool {
objects,
pending: Arc::new(Mutex::new(Vec::new())),
waiting: Arc::new(Mutex::new(Vec::new())),
run_block: Arc::new(Mutex::new(())),
pending_block: Arc::new(Mutex::new(())),
}
}
#[inline]
pub fn len(&self) -> usize {
self.objects.1.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.objects.1.is_empty()
}
#[inline]
pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) {
log::trace!("pending item:{}", str);
let _x = self.pending_block.lock();
let ret = if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
(Some(Reusable::new(&self, item)), false)
/* } else if (self.pending.lock().len() == 0) {
log::trace!("get should pend:{}", str);
self.pending.lock().push(PendingInfo {
id: String::from(str),
notifier: sender.clone(),
});
(None, false)*/
} else {
let to_retry = { self.waiting.lock().len() * 60 + 2 };
log::trace!("try again :{} with retries backoff:{}", str, to_retry);
for i in 0..to_retry {
sleep(std::time::Duration::from_secs(1));
if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
return (Some(Reusable::new(&self, item)), false);
}
}
log::trace!("get should sleep :{}", str);
self.waiting.lock().push(WaitingInfo {
id: String::from(str),
notifier: sender.clone(),
min_request: releasable,
});
(None, true)
};
ret
}
#[inline]
pub fn attach(&'static self, t: T) {
let | elf.run_block.lock();
log::trace!("attach started<<<<<<<<<<<<<<<<");
log::trace!("recyled an item ");
let mut wait_list = { self.waiting.lock() };
log::trace!("check waiting list ok :{}", wait_list.len());
if wait_list.len() > 0 && self.len() >= wait_list[0].min_request {
log::trace!("remove ok<<<<<<<<<<<<<<< ");
let item = wait_list.remove(0);
log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<");
//&wait_list.remove(0);
self.objects.0.send(t).unwrap();
log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request);
for i in 0..item.min_request + 1 {
item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{
log::warn!("notifier send failed");
});
}
drop(item);
// thread::spawn(move || {
// item.notifier.send(()).unwrap();
// });
} else if self.pending.lock().len() > 0 {
drop(wait_list);
let pending_item = self.pending.lock().remove(0);
log::trace!("fill pending:{}", pending_item.id);
// thread::spawn(move || {
// pending_item.notifier.send(());
// });
pending_item.notifier.send(Reusable::new(&self, t));
} else {
// drop(wait_list);
self.objects.0.send(t).unwrap();
log::trace!("push to queue:{}", self.len());
}
}
}
pub struct Reusable<T>
where T: Sync + Send + 'static {
pool: &'static MemoryPool<T>,
data: ManuallyDrop<T>,
}
impl<T> Reusable<T>
where T: Sync + Send + 'static {
#[inline]
pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self {
Self {
pool,
data: ManuallyDrop::new(t),
}
}
// #[inline]
// pub fn detach(mut self) -> (&'a MemoryPool<T>, T) {
// let ret = unsafe { (self.pool, self.take()) };
// forget(self);
// ret
// }
//
unsafe fn take(&mut self) -> T {
ManuallyDrop::take(&mut self.data)
}
}
impl<T> Deref for Reusable<T>
where T: Sync + Send + 'static
{
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> DerefMut for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
impl<T> Drop for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn drop(&mut self) {
unsafe { self.pool.attach(self.take()); }
}
}
#[cfg(test)]
mod tests {
use crate::{MemoryPool, Reusable};
use std::mem::drop;
use std::ops::DerefMut;
use std::thread;
use std::sync::Arc;
// #[test]
// fn pull() {
// let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new()));
// let pool2 = pool.clone();
// let t1 = thread::spawn(move ||{
// let object1 = pool.lock().pull();
// //println!("retain 1");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object2 = pool.pull();
// //println!("retain 2");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object3 = pool.pull();
// //println!("retain 3");
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 1");
// drop(object1);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 2");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 3");
// drop(object3);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// let t2 = thread::spawn(move ||{
// //println!(">>>wait for 2.5s");
// thread::sleep(std::time::Duration::from_millis(2500));
// //println!(">>>try to retain 1.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 2.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 3.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
//
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!(">>>dropped");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// t1.join();
// t2.join();
//
// }
#[test]
fn e2e() {
// let pool = MemoryPool::new(10, || Vec::new());
// let mut objects = Vec::new();
//
// thread::spawn(||{
// for i in 0..10 {
// let mut object = pool.pull();
// }
// });
//
//
//
// drop(objects);
//
//
// for i in 10..0 {
// let mut object = pool.objects.lock().pop().unwrap();
// assert_eq!(object.pop(), Some(i));
// }
}
}
| _x = s | identifier_name |
lib.rs | //! A thread-safe object pool with automatic return and attach/detach semantics
//!
//! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects
//!
//! # Examples
//!
//! ## Creating a Pool
//!
//! The general pool creation looks like this
//! ```
//! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new());
//! ```
//! Example pool with 32 `Vec<u8>` with capacity of 4096
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! ```
//!
//! ## Using a Pool
//!
//! Basic usage for pulling from the pool
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! some_file.read_to_end(reusable_buff);
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//! Pull from pool and `detach()`
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! let (pool, reusable_buff) = reusable_buff.detach();
//! let mut s = String::from(reusable_buff);
//! s.push_str("hello, world!");
//! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//!
//! ## Using Across Threads
//!
//! You simply wrap the pool in a [`std::sync::Arc`]
//! ```
//! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new()));
//! ```
//!
//! # Warning
//!
//! Objects in the pool are not automatically reset, they are returned but NOT reset
//! You may want to call `object.reset()` or `object.clear()`
//! or any other equivalent for the object that you are using, after pulling from the pool
//!
//! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
mod multi_buf;
mod semphore;
pub use multi_buf::{MultiBuffer, GetSegs};
use crossbeam::channel;
use std::ops::{Deref, DerefMut};
use parking_lot::{Mutex, Condvar};
use std::mem::{ManuallyDrop, forget};
use std::sync::Arc;
use std::thread;
use log::{trace};
pub use semphore::Semphore;
use parking_lot::lock_api::MutexGuard;
use futures::SinkExt;
use std::thread::sleep;
pub type Stack<T> = Vec<T>;
pub struct PendingInfo<T>
where T: Sync + Send + 'static
{
id: String,
notifier: channel::Sender<T>,
}
pub struct WaitingInfo<T>
where T: Sync + Send + 'static
{
id: String,
//发送恢复命令
notifier: channel::Sender<T>,
///最低需要多少个内存单元才能恢复
min_request: usize,
}
pub struct MemoryPool<T>
where T: Sync + Send + 'static
{
objects: (channel::Sender<T>, channel::Receiver<T>),
// the one wait for data
pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>,
///those who is sleeping
waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>,
run_block: Arc<Mutex<()>>,
pending_block: Arc<Mutex<()>>,
// recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>),
}
impl<T> MemoryPool<T> where T: Sync + Send + 'static {
#[inline]
pub fn new<F>(cap: usize, init: F) -> MemoryPool<T>
where
F: Fn() -> T,
{
// //println!("mempool remains:{}", cap);
log::trace!("mempool remains:{}", cap);
let mut objects = channel::unbounded();
for _ in 0..cap {
&objects.0.send(init());
}
MemoryPool {
objects,
pending: Arc::new(Mutex::new(Vec::new())),
waiting: Arc::new(Mutex::new(Vec::new())),
run_block: Arc::new(Mutex::new(())),
pending_block: Arc::new(Mutex::new(())),
}
}
#[inline]
pub fn len(&self) -> usize {
self.objects.1.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.objects.1.is_empty()
}
#[inline]
pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) {
log::trace!("pending item:{}", str);
let _x = self.pending_block.lock();
let ret = if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str | waiting.lock().len() * 60 + 2 };
log::trace!("try again :{} with retries backoff:{}", str, to_retry);
for i in 0..to_retry {
sleep(std::time::Duration::from_secs(1));
if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
return (Some(Reusable::new(&self, item)), false);
}
}
log::trace!("get should sleep :{}", str);
self.waiting.lock().push(WaitingInfo {
id: String::from(str),
notifier: sender.clone(),
min_request: releasable,
});
(None, true)
};
ret
}
#[inline]
pub fn attach(&'static self, t: T) {
let _x = self.run_block.lock();
log::trace!("attach started<<<<<<<<<<<<<<<<");
log::trace!("recyled an item ");
let mut wait_list = { self.waiting.lock() };
log::trace!("check waiting list ok :{}", wait_list.len());
if wait_list.len() > 0 && self.len() >= wait_list[0].min_request {
log::trace!("remove ok<<<<<<<<<<<<<<< ");
let item = wait_list.remove(0);
log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<");
//&wait_list.remove(0);
self.objects.0.send(t).unwrap();
log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request);
for i in 0..item.min_request + 1 {
item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{
log::warn!("notifier send failed");
});
}
drop(item);
// thread::spawn(move || {
// item.notifier.send(()).unwrap();
// });
} else if self.pending.lock().len() > 0 {
drop(wait_list);
let pending_item = self.pending.lock().remove(0);
log::trace!("fill pending:{}", pending_item.id);
// thread::spawn(move || {
// pending_item.notifier.send(());
// });
pending_item.notifier.send(Reusable::new(&self, t));
} else {
// drop(wait_list);
self.objects.0.send(t).unwrap();
log::trace!("push to queue:{}", self.len());
}
}
}
pub struct Reusable<T>
where T: Sync + Send + 'static {
pool: &'static MemoryPool<T>,
data: ManuallyDrop<T>,
}
impl<T> Reusable<T>
where T: Sync + Send + 'static {
#[inline]
pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self {
Self {
pool,
data: ManuallyDrop::new(t),
}
}
// #[inline]
// pub fn detach(mut self) -> (&'a MemoryPool<T>, T) {
// let ret = unsafe { (self.pool, self.take()) };
// forget(self);
// ret
// }
//
unsafe fn take(&mut self) -> T {
ManuallyDrop::take(&mut self.data)
}
}
impl<T> Deref for Reusable<T>
where T: Sync + Send + 'static
{
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> DerefMut for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
impl<T> Drop for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn drop(&mut self) {
unsafe { self.pool.attach(self.take()); }
}
}
#[cfg(test)]
mod tests {
use crate::{MemoryPool, Reusable};
use std::mem::drop;
use std::ops::DerefMut;
use std::thread;
use std::sync::Arc;
// #[test]
// fn pull() {
// let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new()));
// let pool2 = pool.clone();
// let t1 = thread::spawn(move ||{
// let object1 = pool.lock().pull();
// //println!("retain 1");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object2 = pool.pull();
// //println!("retain 2");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object3 = pool.pull();
// //println!("retain 3");
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 1");
// drop(object1);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 2");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 3");
// drop(object3);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// let t2 = thread::spawn(move ||{
// //println!(">>>wait for 2.5s");
// thread::sleep(std::time::Duration::from_millis(2500));
// //println!(">>>try to retain 1.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 2.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 3.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
//
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!(">>>dropped");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// t1.join();
// t2.join();
//
// }
#[test]
fn e2e() {
// let pool = MemoryPool::new(10, || Vec::new());
// let mut objects = Vec::new();
//
// thread::spawn(||{
// for i in 0..10 {
// let mut object = pool.pull();
// }
// });
//
//
//
// drop(objects);
//
//
// for i in 10..0 {
// let mut object = pool.objects.lock().pop().unwrap();
// assert_eq!(object.pop(), Some(i));
// }
}
}
| );
(Some(Reusable::new(&self, item)), false)
/* } else if (self.pending.lock().len() == 0) {
log::trace!("get should pend:{}", str);
self.pending.lock().push(PendingInfo {
id: String::from(str),
notifier: sender.clone(),
});
(None, false)*/
} else {
let to_retry = { self. | conditional_block |
lib.rs | //! A thread-safe object pool with automatic return and attach/detach semantics
//!
//! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects
//!
//! # Examples
//!
//! ## Creating a Pool
//!
//! The general pool creation looks like this
//! ```
//! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new());
//! ```
//! Example pool with 32 `Vec<u8>` with capacity of 4096
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! ```
//!
//! ## Using a Pool
//!
//! Basic usage for pulling from the pool
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! some_file.read_to_end(reusable_buff);
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//! Pull from pool and `detach()`
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! let (pool, reusable_buff) = reusable_buff.detach();
//! let mut s = String::from(reusable_buff);
//! s.push_str("hello, world!");
//! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//!
//! ## Using Across Threads
//!
//! You simply wrap the pool in a [`std::sync::Arc`]
//! ```
//! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new()));
//! ```
//!
//! # Warning
//!
//! Objects in the pool are not automatically reset, they are returned but NOT reset
//! You may want to call `object.reset()` or `object.clear()`
//! or any other equivalent for the object that you are using, after pulling from the pool
//!
//! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
mod multi_buf;
mod semphore;
pub use multi_buf::{MultiBuffer, GetSegs};
use crossbeam::channel;
use std::ops::{Deref, DerefMut};
use parking_lot::{Mutex, Condvar};
use std::mem::{ManuallyDrop, forget};
use std::sync::Arc;
use std::thread;
use log::{trace};
pub use semphore::Semphore;
use parking_lot::lock_api::MutexGuard;
use futures::SinkExt;
use std::thread::sleep;
pub type Stack<T> = Vec<T>;
pub struct PendingInfo<T>
where T: Sync + Send + 'static
{
id: String,
notifier: channel::Sender<T>,
}
pub struct WaitingInfo<T>
where T: Sync + Send + 'static
{
id: String,
//发送恢复命令
notifier: channel::Sender<T>,
///最低需要多少个内存单元才能恢复
min_request: usize,
}
pub struct MemoryPool<T>
where T: Sync + Send + 'static
{
objects: (channel::Sender<T>, channel::Receiver<T>),
// the one wait for data
pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>,
///those who is sleeping
waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>,
run_block: Arc<Mutex<()>>,
pending_block: Arc<Mutex<()>>,
// recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>),
}
impl<T> MemoryPool<T> where T: Sync + Send + 'static {
#[inline]
pub fn new<F>(cap: usize, init: F) -> MemoryPool<T>
where
F: Fn() -> T,
{
// //println!("mempool remains: | usize {
self.objects.1.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.objects.1.is_empty()
}
#[inline]
pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) {
log::trace!("pending item:{}", str);
let _x = self.pending_block.lock();
let ret = if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
(Some(Reusable::new(&self, item)), false)
/* } else if (self.pending.lock().len() == 0) {
log::trace!("get should pend:{}", str);
self.pending.lock().push(PendingInfo {
id: String::from(str),
notifier: sender.clone(),
});
(None, false)*/
} else {
let to_retry = { self.waiting.lock().len() * 60 + 2 };
log::trace!("try again :{} with retries backoff:{}", str, to_retry);
for i in 0..to_retry {
sleep(std::time::Duration::from_secs(1));
if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
return (Some(Reusable::new(&self, item)), false);
}
}
log::trace!("get should sleep :{}", str);
self.waiting.lock().push(WaitingInfo {
id: String::from(str),
notifier: sender.clone(),
min_request: releasable,
});
(None, true)
};
ret
}
#[inline]
pub fn attach(&'static self, t: T) {
let _x = self.run_block.lock();
log::trace!("attach started<<<<<<<<<<<<<<<<");
log::trace!("recyled an item ");
let mut wait_list = { self.waiting.lock() };
log::trace!("check waiting list ok :{}", wait_list.len());
if wait_list.len() > 0 && self.len() >= wait_list[0].min_request {
log::trace!("remove ok<<<<<<<<<<<<<<< ");
let item = wait_list.remove(0);
log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<");
//&wait_list.remove(0);
self.objects.0.send(t).unwrap();
log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request);
for i in 0..item.min_request + 1 {
item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{
log::warn!("notifier send failed");
});
}
drop(item);
// thread::spawn(move || {
// item.notifier.send(()).unwrap();
// });
} else if self.pending.lock().len() > 0 {
drop(wait_list);
let pending_item = self.pending.lock().remove(0);
log::trace!("fill pending:{}", pending_item.id);
// thread::spawn(move || {
// pending_item.notifier.send(());
// });
pending_item.notifier.send(Reusable::new(&self, t));
} else {
// drop(wait_list);
self.objects.0.send(t).unwrap();
log::trace!("push to queue:{}", self.len());
}
}
}
pub struct Reusable<T>
where T: Sync + Send + 'static {
pool: &'static MemoryPool<T>,
data: ManuallyDrop<T>,
}
impl<T> Reusable<T>
where T: Sync + Send + 'static {
#[inline]
pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self {
Self {
pool,
data: ManuallyDrop::new(t),
}
}
// #[inline]
// pub fn detach(mut self) -> (&'a MemoryPool<T>, T) {
// let ret = unsafe { (self.pool, self.take()) };
// forget(self);
// ret
// }
//
unsafe fn take(&mut self) -> T {
ManuallyDrop::take(&mut self.data)
}
}
impl<T> Deref for Reusable<T>
where T: Sync + Send + 'static
{
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> DerefMut for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
impl<T> Drop for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn drop(&mut self) {
unsafe { self.pool.attach(self.take()); }
}
}
#[cfg(test)]
mod tests {
use crate::{MemoryPool, Reusable};
use std::mem::drop;
use std::ops::DerefMut;
use std::thread;
use std::sync::Arc;
// #[test]
// fn pull() {
// let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new()));
// let pool2 = pool.clone();
// let t1 = thread::spawn(move ||{
// let object1 = pool.lock().pull();
// //println!("retain 1");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object2 = pool.pull();
// //println!("retain 2");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object3 = pool.pull();
// //println!("retain 3");
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 1");
// drop(object1);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 2");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 3");
// drop(object3);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// let t2 = thread::spawn(move ||{
// //println!(">>>wait for 2.5s");
// thread::sleep(std::time::Duration::from_millis(2500));
// //println!(">>>try to retain 1.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 2.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 3.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
//
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!(">>>dropped");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// t1.join();
// t2.join();
//
// }
#[test]
fn e2e() {
// let pool = MemoryPool::new(10, || Vec::new());
// let mut objects = Vec::new();
//
// thread::spawn(||{
// for i in 0..10 {
// let mut object = pool.pull();
// }
// });
//
//
//
// drop(objects);
//
//
// for i in 10..0 {
// let mut object = pool.objects.lock().pop().unwrap();
// assert_eq!(object.pop(), Some(i));
// }
}
}
| {}", cap);
log::trace!("mempool remains:{}", cap);
let mut objects = channel::unbounded();
for _ in 0..cap {
&objects.0.send(init());
}
MemoryPool {
objects,
pending: Arc::new(Mutex::new(Vec::new())),
waiting: Arc::new(Mutex::new(Vec::new())),
run_block: Arc::new(Mutex::new(())),
pending_block: Arc::new(Mutex::new(())),
}
}
#[inline]
pub fn len(&self) -> | identifier_body |
lib.rs | //! A thread-safe object pool with automatic return and attach/detach semantics
//!
//! The goal of an object pool is to reuse expensive to allocate objects or frequently allocated objects
//!
//! # Examples
//!
//! ## Creating a Pool
//!
//! The general pool creation looks like this
//! ```
//! let pool: MemPool<T> = MemoryPool::new(capacity, || T::new());
//! ```
//! Example pool with 32 `Vec<u8>` with capacity of 4096
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! ```
//!
//! ## Using a Pool
//!
//! Basic usage for pulling from the pool
//! ``` | //! some_file.read_to_end(reusable_buff);
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//! Pull from pool and `detach()`
//! ```
//! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using
//! let (pool, reusable_buff) = reusable_buff.detach();
//! let mut s = String::from(reusable_buff);
//! s.push_str("hello, world!");
//! pool.attach(s.into_bytes()); // reattach the buffer before reusable goes out of scope
//! // reusable_buff is automatically returned to the pool when it goes out of scope
//! ```
//!
//! ## Using Across Threads
//!
//! You simply wrap the pool in a [`std::sync::Arc`]
//! ```
//! let pool: Arc<MemoryPool<T>> = Arc::new(MemoryPool::new(cap, || T::new()));
//! ```
//!
//! # Warning
//!
//! Objects in the pool are not automatically reset, they are returned but NOT reset
//! You may want to call `object.reset()` or `object.clear()`
//! or any other equivalent for the object that you are using, after pulling from the pool
//!
//! [`std::sync::Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
mod multi_buf;
mod semphore;
pub use multi_buf::{MultiBuffer, GetSegs};
use crossbeam::channel;
use std::ops::{Deref, DerefMut};
use parking_lot::{Mutex, Condvar};
use std::mem::{ManuallyDrop, forget};
use std::sync::Arc;
use std::thread;
use log::{trace};
pub use semphore::Semphore;
use parking_lot::lock_api::MutexGuard;
use futures::SinkExt;
use std::thread::sleep;
pub type Stack<T> = Vec<T>;
pub struct PendingInfo<T>
where T: Sync + Send + 'static
{
id: String,
notifier: channel::Sender<T>,
}
pub struct WaitingInfo<T>
where T: Sync + Send + 'static
{
id: String,
//发送恢复命令
notifier: channel::Sender<T>,
///最低需要多少个内存单元才能恢复
min_request: usize,
}
pub struct MemoryPool<T>
where T: Sync + Send + 'static
{
objects: (channel::Sender<T>, channel::Receiver<T>),
// the one wait for data
pending: Arc<Mutex<Vec<PendingInfo<Reusable<T>>>>>,
///those who is sleeping
waiting: Arc<Mutex<Vec<WaitingInfo<Reusable<T>>>>>,
run_block: Arc<Mutex<()>>,
pending_block: Arc<Mutex<()>>,
// recycle: (channel::Sender<Reusable<'a,T>>, channel::Receiver<Reusable<'a,T>>),
}
impl<T> MemoryPool<T> where T: Sync + Send + 'static {
#[inline]
pub fn new<F>(cap: usize, init: F) -> MemoryPool<T>
where
F: Fn() -> T,
{
// //println!("mempool remains:{}", cap);
log::trace!("mempool remains:{}", cap);
let mut objects = channel::unbounded();
for _ in 0..cap {
&objects.0.send(init());
}
MemoryPool {
objects,
pending: Arc::new(Mutex::new(Vec::new())),
waiting: Arc::new(Mutex::new(Vec::new())),
run_block: Arc::new(Mutex::new(())),
pending_block: Arc::new(Mutex::new(())),
}
}
#[inline]
pub fn len(&self) -> usize {
self.objects.1.len()
}
#[inline]
pub fn is_empty(&self) -> bool {
self.objects.1.is_empty()
}
#[inline]
pub fn pending(&'static self, str: &str, sender: channel::Sender<Reusable<T>>, releasable: usize) -> (Option<Reusable<T>>, bool) {
log::trace!("pending item:{}", str);
let _x = self.pending_block.lock();
let ret = if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
(Some(Reusable::new(&self, item)), false)
/* } else if (self.pending.lock().len() == 0) {
log::trace!("get should pend:{}", str);
self.pending.lock().push(PendingInfo {
id: String::from(str),
notifier: sender.clone(),
});
(None, false)*/
} else {
let to_retry = { self.waiting.lock().len() * 60 + 2 };
log::trace!("try again :{} with retries backoff:{}", str, to_retry);
for i in 0..to_retry {
sleep(std::time::Duration::from_secs(1));
if let Ok(item) = self.objects.1.try_recv() {
log::trace!("get ok:{}", str);
return (Some(Reusable::new(&self, item)), false);
}
}
log::trace!("get should sleep :{}", str);
self.waiting.lock().push(WaitingInfo {
id: String::from(str),
notifier: sender.clone(),
min_request: releasable,
});
(None, true)
};
ret
}
#[inline]
pub fn attach(&'static self, t: T) {
let _x = self.run_block.lock();
log::trace!("attach started<<<<<<<<<<<<<<<<");
log::trace!("recyled an item ");
let mut wait_list = { self.waiting.lock() };
log::trace!("check waiting list ok :{}", wait_list.len());
if wait_list.len() > 0 && self.len() >= wait_list[0].min_request {
log::trace!("remove ok<<<<<<<<<<<<<<< ");
let item = wait_list.remove(0);
log::trace!("start wakeup<<<<<<<<<<<<<<<<<<<");
//&wait_list.remove(0);
self.objects.0.send(t).unwrap();
log::trace!("free cnts:{}, waking up {}/ with min req:{} now.... ", self.len(), item.id.clone(), item.min_request);
for i in 0..item.min_request + 1 {
item.notifier.send(Reusable::new(&self, self.objects.1.recv().unwrap())).unwrap_or_else(|e|{
log::warn!("notifier send failed");
});
}
drop(item);
// thread::spawn(move || {
// item.notifier.send(()).unwrap();
// });
} else if self.pending.lock().len() > 0 {
drop(wait_list);
let pending_item = self.pending.lock().remove(0);
log::trace!("fill pending:{}", pending_item.id);
// thread::spawn(move || {
// pending_item.notifier.send(());
// });
pending_item.notifier.send(Reusable::new(&self, t));
} else {
// drop(wait_list);
self.objects.0.send(t).unwrap();
log::trace!("push to queue:{}", self.len());
}
}
}
pub struct Reusable<T>
where T: Sync + Send + 'static {
pool: &'static MemoryPool<T>,
data: ManuallyDrop<T>,
}
impl<T> Reusable<T>
where T: Sync + Send + 'static {
#[inline]
pub fn new(pool: &'static MemoryPool<T>, t: T) -> Self {
Self {
pool,
data: ManuallyDrop::new(t),
}
}
// #[inline]
// pub fn detach(mut self) -> (&'a MemoryPool<T>, T) {
// let ret = unsafe { (self.pool, self.take()) };
// forget(self);
// ret
// }
//
unsafe fn take(&mut self) -> T {
ManuallyDrop::take(&mut self.data)
}
}
impl<T> Deref for Reusable<T>
where T: Sync + Send + 'static
{
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.data
}
}
impl<T> DerefMut for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.data
}
}
impl<T> Drop for Reusable<T>
where T: Sync + Send + 'static
{
#[inline]
fn drop(&mut self) {
unsafe { self.pool.attach(self.take()); }
}
}
#[cfg(test)]
mod tests {
use crate::{MemoryPool, Reusable};
use std::mem::drop;
use std::ops::DerefMut;
use std::thread;
use std::sync::Arc;
// #[test]
// fn pull() {
// let pool = Arc::new(MemoryPool::<Vec<u8>>::new(3, || Vec::new()));
// let pool2 = pool.clone();
// let t1 = thread::spawn(move ||{
// let object1 = pool.lock().pull();
// //println!("retain 1");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object2 = pool.pull();
// //println!("retain 2");
// thread::sleep(std::time::Duration::from_secs(1));
//
// let object3 = pool.pull();
// //println!("retain 3");
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 1");
// drop(object1);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 2");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!("drop 3");
// drop(object3);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// let t2 = thread::spawn(move ||{
// //println!(">>>wait for 2.5s");
// thread::sleep(std::time::Duration::from_millis(2500));
// //println!(">>>try to retain 1.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 2.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
// //println!(">>>try to retain 3.....");
// let object2 = pool2.pull();
// //println!(">>>retained 1");
//
// thread::sleep(std::time::Duration::from_secs(1));
//
// //println!(">>>dropped");
// drop(object2);
// thread::sleep(std::time::Duration::from_secs(1));
//
// });
// t1.join();
// t2.join();
//
// }
#[test]
fn e2e() {
// let pool = MemoryPool::new(10, || Vec::new());
// let mut objects = Vec::new();
//
// thread::spawn(||{
// for i in 0..10 {
// let mut object = pool.pull();
// }
// });
//
//
//
// drop(objects);
//
//
// for i in 10..0 {
// let mut object = pool.objects.lock().pop().unwrap();
// assert_eq!(object.pop(), Some(i));
// }
}
} | //! let pool: MemoryPool<Vec<u8>> = MemoryPool::new(32, || Vec::with_capacity(4096));
//! let mut reusable_buff = pool.pull().unwrap(); // returns None when the pool is saturated
//! reusable_buff.clear(); // clear the buff before using | random_line_split |
counter.rs | use std::ffi::CString;
use std::io;
use std::sync::{Mutex, Once};
#[cfg(target_os = "freebsd")]
use libc::EDOOFUS;
#[cfg(target_os = "freebsd")]
use pmc_sys::{
pmc_allocate, pmc_attach, pmc_detach, pmc_id_t, pmc_init, pmc_mode_PMC_MODE_SC,
pmc_mode_PMC_MODE_TC, pmc_read, pmc_release, pmc_rw, pmc_start, pmc_stop,
};
#[cfg(not(target_os = "freebsd"))]
use super::stubs::*;
use crate::CPU_ANY;
use crate::{
error::{new_error, new_os_error, Error, ErrorKind},
signal,
};
static PMC_INIT: Once = Once::new();
lazy_static! {
static ref BIG_FAT_LOCK: Mutex<u32> = Mutex::new(42);
}
/// Configure event counter parameters.
///
/// Unless specified, a counter is allocated in counting mode with a system-wide
/// scope, recording events across all CPUs.
///
/// ```no_run
/// let config = CounterConfig::default().attach_to(vec![0]);
///
/// let instr = config.allocate("inst_retired.any")?;
/// let l1_hits = config.allocate("mem_load_uops_retired.l1_hit")?;
/// #
/// # Ok::<(), Error>(())
/// ```
#[derive(Debug, Default, Clone)]
pub struct CounterBuilder {
cpu: Option<i32>,
pids: Option<Vec<i32>>,
}
impl CounterBuilder {
/// Specify the CPU number that the PMC is to be allocated on.
///
/// Defaults to all CPUs ([`CPU_ANY`]).
pub fn set_cpu(self, cpu: i32) -> Self {
Self {
cpu: Some(cpu),
..self
}
}
/// Attach a counter to the specified PID(s).
///
/// When set, this causes the PMC to be allocated in process-scoped counting
/// mode ([`pmc_mode_PMC_MODE_TC`] - see `man pmc`).
///
/// # PID 0
///
/// PID 0 is a magic value, attaching to it causes the counter to be
/// attached to the current (caller's) PID.
pub fn attach_to(self, pids: impl Into<Vec<i32>>) -> Self {
Self {
pids: Some(pids.into()),
..self
}
}
/// Allocate a PMC with the specified configuration, and attach to the
/// target PIDs (if any).
pub fn allocate(&self, event_spec: impl Into<String>) -> Result<Counter, Error> {
Counter::new(event_spec, self.cpu, self.pids.clone())
}
}
#[derive(Debug)]
struct AttachHandle {
id: pmc_id_t,
pid: i32,
}
impl Drop for AttachHandle {
fn drop(&mut self) {
// BUG: do not attempt to detach from pid 0 or risk live-locking the
// machine.
//
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=227041
//
if self.pid != 0 {
unsafe { pmc_detach(self.id, self.pid) };
}
}
}
/// A handle to a running PMC counter.
///
/// Dropping this handle causes the counter to stop recording events.
pub struct Running<'a> {
counter: &'a mut Counter,
}
impl<'a> Running<'a> {
/// Read the current counter value.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let handle = counter.start()?;
///
/// println!("instructions: {}", handle.read()?);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn read(&self) -> Result<u64, Error> {
self.counter.read()
}
/// Set the value of the counter.
pub fn set(&mut self, value: u64) -> Result<u64, Error> {
self.counter.set(value)
}
/// Stop the counter from recording new events.
pub fn stop(self) {
drop(self)
}
}
impl<'a> std::fmt::Display for Running<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.counter.fmt(f)
}
}
impl<'a> Drop for Running<'a> {
fn | (&mut self) {
unsafe { pmc_stop(self.counter.id) };
}
}
/// An allocated PMC counter.
///
/// Counters are initialised using the [`CounterBuilder`] type.
///
/// ```no_run
/// use std::{thread, time::Duration};
///
/// let instr = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let handle = instr.start()?;
///
/// // Stop the counter after 5 seconds
/// thread::sleep(Duration::from_secs(5));
/// handle.stop();
///
/// println!("instructions: {}", instr.read()?);
/// #
/// # Ok::<(), Error>(())
/// ```
#[derive(Debug)]
pub struct Counter {
id: pmc_id_t,
attached: Option<Vec<AttachHandle>>,
}
impl Counter {
fn new(
event_spec: impl Into<String>,
cpu: Option<i32>,
pids: Option<Vec<i32>>,
) -> Result<Self, Error> {
// If there's any pids, request a process counter, otherwise a
// system-wide counter.
let pmc_mode = if pids.is_none() {
pmc_mode_PMC_MODE_SC
} else {
pmc_mode_PMC_MODE_TC
};
// It appears pmc_allocate isn't thread safe, so take a lock while
// calling it.
let _guard = BIG_FAT_LOCK.lock().unwrap();
init_pmc_once()?;
signal::check()?;
let c_spec =
CString::new(event_spec.into()).map_err(|_| new_error(ErrorKind::InvalidEventSpec))?;
// Allocate the PMC
let mut id = 0;
if unsafe {
pmc_allocate(
c_spec.as_ptr(),
pmc_mode,
0,
cpu.unwrap_or(CPU_ANY),
&mut id,
0,
)
} != 0
{
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::EINVAL) => Err(new_os_error(ErrorKind::AllocInit)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
// Initialise the counter so dropping it releases the PMC
let mut c = Counter { id, attached: None };
// Attach to pids, if any, and collect handles so dropping them later
// causes them to detach.
//
// The handles MUST be dropped before the Counter instance.
if let Some(pids) = pids {
let mut handles = vec![];
for pid in pids {
if unsafe { pmc_attach(id, pid) } != 0 {
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::EBUSY) => unreachable!(),
Some(libc::EEXIST) => Err(new_os_error(ErrorKind::AlreadyAttached)),
Some(libc::EPERM) => Err(new_os_error(ErrorKind::Forbidden)),
Some(libc::EINVAL) | Some(libc::ESRCH) => {
Err(new_os_error(ErrorKind::BadTarget))
}
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
handles.push(AttachHandle { id, pid })
}
c.attached = Some(handles)
}
Ok(c)
}
/// Start this counter.
///
/// The counter stops when the returned [`Running`] handle is dropped.
#[must_use = "counter only runs until handle is dropped"]
pub fn start(&mut self) -> Result<Running<'_>, Error> {
signal::check()?;
if unsafe { pmc_start(self.id) } != 0 {
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(EDOOFUS) => Err(new_os_error(ErrorKind::LogFileRequired)),
Some(libc::ENXIO) => Err(new_os_error(ErrorKind::BadScope)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
Ok(Running { counter: self })
}
/// Read the counter value.
///
/// This call is valid for both running, stopped, and unused counters.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let r1 = counter.read()?;
/// let r2 = counter.read()?;
///
/// // A counter that is not running does not advance
/// assert!(r2 == r1);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn read(&self) -> Result<u64, Error> {
signal::check()?;
let mut value: u64 = 0;
if unsafe { pmc_read(self.id, &mut value) } != 0 {
return Err(new_os_error(ErrorKind::Unknown));
}
Ok(value)
}
/// Set an explicit counter value.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let r1 = counter.set(42)?;
/// // The previous value is returned when setting a new value
/// assert_eq!(r1, 0);
///
/// // Reading the counter returns the value set
/// let r2 = counter.read()?;
/// assert_eq!(r2, 42);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn set(&mut self, value: u64) -> Result<u64, Error> {
signal::check()?;
let mut old: u64 = 0;
if unsafe { pmc_rw(self.id, value, &mut old) } != 0 {
let err = io::Error::last_os_error();
return match io::Error::raw_os_error(&err) {
Some(libc::EBUSY) => panic!("{}", err.to_string()),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
Ok(old)
}
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.read() {
Ok(v) => write!(f, "{}", v),
Err(e) => write!(f, "error: {}", e),
}
}
}
impl Drop for Counter {
fn drop(&mut self) {
let _guard = BIG_FAT_LOCK.lock().unwrap();
// The handles MUST be dropped before the Counter instance
self.attached = None;
unsafe {
pmc_release(self.id);
}
}
}
fn init_pmc_once() -> Result<(), Error> {
let mut maybe_err = Ok(());
PMC_INIT.call_once(|| {
if unsafe { pmc_init() } != 0 {
maybe_err = match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::ENOENT) => Err(new_os_error(ErrorKind::Init)),
Some(libc::ENXIO) => Err(new_os_error(ErrorKind::Unsupported)),
Some(libc::EPROGMISMATCH) => Err(new_os_error(ErrorKind::VersionMismatch)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
return;
}
// Register the signal handler
signal::watch_for(&[libc::SIGBUS, libc::SIGIO]);
});
maybe_err
}
| drop | identifier_name |
counter.rs | use std::ffi::CString;
use std::io;
use std::sync::{Mutex, Once};
#[cfg(target_os = "freebsd")]
use libc::EDOOFUS;
#[cfg(target_os = "freebsd")]
use pmc_sys::{
pmc_allocate, pmc_attach, pmc_detach, pmc_id_t, pmc_init, pmc_mode_PMC_MODE_SC,
pmc_mode_PMC_MODE_TC, pmc_read, pmc_release, pmc_rw, pmc_start, pmc_stop,
};
#[cfg(not(target_os = "freebsd"))]
use super::stubs::*;
use crate::CPU_ANY;
use crate::{
error::{new_error, new_os_error, Error, ErrorKind},
signal,
};
static PMC_INIT: Once = Once::new();
lazy_static! {
static ref BIG_FAT_LOCK: Mutex<u32> = Mutex::new(42);
}
/// Configure event counter parameters.
///
/// Unless specified, a counter is allocated in counting mode with a system-wide
/// scope, recording events across all CPUs.
///
/// ```no_run
/// let config = CounterConfig::default().attach_to(vec![0]);
///
/// let instr = config.allocate("inst_retired.any")?;
/// let l1_hits = config.allocate("mem_load_uops_retired.l1_hit")?;
/// #
/// # Ok::<(), Error>(())
/// ```
#[derive(Debug, Default, Clone)]
pub struct CounterBuilder {
cpu: Option<i32>,
pids: Option<Vec<i32>>,
}
impl CounterBuilder {
/// Specify the CPU number that the PMC is to be allocated on.
///
/// Defaults to all CPUs ([`CPU_ANY`]).
pub fn set_cpu(self, cpu: i32) -> Self {
Self {
cpu: Some(cpu),
..self
}
}
/// Attach a counter to the specified PID(s).
///
/// When set, this causes the PMC to be allocated in process-scoped counting
/// mode ([`pmc_mode_PMC_MODE_TC`] - see `man pmc`).
///
/// # PID 0
///
/// PID 0 is a magic value, attaching to it causes the counter to be
/// attached to the current (caller's) PID.
pub fn attach_to(self, pids: impl Into<Vec<i32>>) -> Self {
Self {
pids: Some(pids.into()),
..self
}
}
/// Allocate a PMC with the specified configuration, and attach to the
/// target PIDs (if any).
pub fn allocate(&self, event_spec: impl Into<String>) -> Result<Counter, Error> {
Counter::new(event_spec, self.cpu, self.pids.clone())
}
}
#[derive(Debug)]
struct AttachHandle {
id: pmc_id_t,
pid: i32,
}
impl Drop for AttachHandle {
fn drop(&mut self) {
// BUG: do not attempt to detach from pid 0 or risk live-locking the
// machine.
//
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=227041
//
if self.pid != 0 {
unsafe { pmc_detach(self.id, self.pid) };
}
}
}
/// A handle to a running PMC counter.
///
/// Dropping this handle causes the counter to stop recording events.
pub struct Running<'a> {
counter: &'a mut Counter,
}
impl<'a> Running<'a> {
/// Read the current counter value.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let handle = counter.start()?;
///
/// println!("instructions: {}", handle.read()?);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn read(&self) -> Result<u64, Error> {
self.counter.read()
}
/// Set the value of the counter.
pub fn set(&mut self, value: u64) -> Result<u64, Error> {
self.counter.set(value)
}
/// Stop the counter from recording new events.
pub fn stop(self) {
drop(self)
}
}
impl<'a> std::fmt::Display for Running<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.counter.fmt(f)
}
}
impl<'a> Drop for Running<'a> {
fn drop(&mut self) {
unsafe { pmc_stop(self.counter.id) };
}
}
/// An allocated PMC counter.
///
/// Counters are initialised using the [`CounterBuilder`] type.
///
/// ```no_run
/// use std::{thread, time::Duration};
///
/// let instr = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let handle = instr.start()?;
///
/// // Stop the counter after 5 seconds
/// thread::sleep(Duration::from_secs(5));
/// handle.stop();
///
/// println!("instructions: {}", instr.read()?);
/// #
/// # Ok::<(), Error>(())
/// ```
#[derive(Debug)]
pub struct Counter {
id: pmc_id_t,
attached: Option<Vec<AttachHandle>>,
}
impl Counter {
fn new(
event_spec: impl Into<String>,
cpu: Option<i32>,
pids: Option<Vec<i32>>,
) -> Result<Self, Error> {
// If there's any pids, request a process counter, otherwise a
// system-wide counter.
let pmc_mode = if pids.is_none() {
pmc_mode_PMC_MODE_SC
} else {
pmc_mode_PMC_MODE_TC
};
// It appears pmc_allocate isn't thread safe, so take a lock while
// calling it.
let _guard = BIG_FAT_LOCK.lock().unwrap();
init_pmc_once()?;
signal::check()?;
let c_spec =
CString::new(event_spec.into()).map_err(|_| new_error(ErrorKind::InvalidEventSpec))?;
// Allocate the PMC
let mut id = 0;
if unsafe {
pmc_allocate(
c_spec.as_ptr(),
pmc_mode,
0,
cpu.unwrap_or(CPU_ANY),
&mut id,
0,
)
} != 0
{
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::EINVAL) => Err(new_os_error(ErrorKind::AllocInit)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
// Initialise the counter so dropping it releases the PMC
let mut c = Counter { id, attached: None };
// Attach to pids, if any, and collect handles so dropping them later
// causes them to detach.
//
// The handles MUST be dropped before the Counter instance.
if let Some(pids) = pids {
let mut handles = vec![];
for pid in pids {
if unsafe { pmc_attach(id, pid) } != 0 |
handles.push(AttachHandle { id, pid })
}
c.attached = Some(handles)
}
Ok(c)
}
/// Start this counter.
///
/// The counter stops when the returned [`Running`] handle is dropped.
#[must_use = "counter only runs until handle is dropped"]
pub fn start(&mut self) -> Result<Running<'_>, Error> {
signal::check()?;
if unsafe { pmc_start(self.id) } != 0 {
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(EDOOFUS) => Err(new_os_error(ErrorKind::LogFileRequired)),
Some(libc::ENXIO) => Err(new_os_error(ErrorKind::BadScope)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
Ok(Running { counter: self })
}
/// Read the counter value.
///
/// This call is valid for both running, stopped, and unused counters.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let r1 = counter.read()?;
/// let r2 = counter.read()?;
///
/// // A counter that is not running does not advance
/// assert!(r2 == r1);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn read(&self) -> Result<u64, Error> {
signal::check()?;
let mut value: u64 = 0;
if unsafe { pmc_read(self.id, &mut value) } != 0 {
return Err(new_os_error(ErrorKind::Unknown));
}
Ok(value)
}
/// Set an explicit counter value.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let r1 = counter.set(42)?;
/// // The previous value is returned when setting a new value
/// assert_eq!(r1, 0);
///
/// // Reading the counter returns the value set
/// let r2 = counter.read()?;
/// assert_eq!(r2, 42);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn set(&mut self, value: u64) -> Result<u64, Error> {
signal::check()?;
let mut old: u64 = 0;
if unsafe { pmc_rw(self.id, value, &mut old) } != 0 {
let err = io::Error::last_os_error();
return match io::Error::raw_os_error(&err) {
Some(libc::EBUSY) => panic!("{}", err.to_string()),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
Ok(old)
}
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.read() {
Ok(v) => write!(f, "{}", v),
Err(e) => write!(f, "error: {}", e),
}
}
}
impl Drop for Counter {
fn drop(&mut self) {
let _guard = BIG_FAT_LOCK.lock().unwrap();
// The handles MUST be dropped before the Counter instance
self.attached = None;
unsafe {
pmc_release(self.id);
}
}
}
fn init_pmc_once() -> Result<(), Error> {
let mut maybe_err = Ok(());
PMC_INIT.call_once(|| {
if unsafe { pmc_init() } != 0 {
maybe_err = match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::ENOENT) => Err(new_os_error(ErrorKind::Init)),
Some(libc::ENXIO) => Err(new_os_error(ErrorKind::Unsupported)),
Some(libc::EPROGMISMATCH) => Err(new_os_error(ErrorKind::VersionMismatch)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
return;
}
// Register the signal handler
signal::watch_for(&[libc::SIGBUS, libc::SIGIO]);
});
maybe_err
}
| {
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::EBUSY) => unreachable!(),
Some(libc::EEXIST) => Err(new_os_error(ErrorKind::AlreadyAttached)),
Some(libc::EPERM) => Err(new_os_error(ErrorKind::Forbidden)),
Some(libc::EINVAL) | Some(libc::ESRCH) => {
Err(new_os_error(ErrorKind::BadTarget))
}
_ => Err(new_os_error(ErrorKind::Unknown)),
};
} | conditional_block |
counter.rs | use std::ffi::CString;
use std::io;
use std::sync::{Mutex, Once};
#[cfg(target_os = "freebsd")]
use libc::EDOOFUS;
#[cfg(target_os = "freebsd")]
use pmc_sys::{
pmc_allocate, pmc_attach, pmc_detach, pmc_id_t, pmc_init, pmc_mode_PMC_MODE_SC,
pmc_mode_PMC_MODE_TC, pmc_read, pmc_release, pmc_rw, pmc_start, pmc_stop,
};
#[cfg(not(target_os = "freebsd"))]
use super::stubs::*;
use crate::CPU_ANY;
use crate::{
error::{new_error, new_os_error, Error, ErrorKind},
signal,
};
static PMC_INIT: Once = Once::new();
lazy_static! {
static ref BIG_FAT_LOCK: Mutex<u32> = Mutex::new(42);
}
/// Configure event counter parameters.
///
/// Unless specified, a counter is allocated in counting mode with a system-wide
/// scope, recording events across all CPUs.
///
/// ```no_run
/// let config = CounterConfig::default().attach_to(vec![0]);
///
/// let instr = config.allocate("inst_retired.any")?;
/// let l1_hits = config.allocate("mem_load_uops_retired.l1_hit")?;
/// #
/// # Ok::<(), Error>(())
/// ```
#[derive(Debug, Default, Clone)]
pub struct CounterBuilder {
cpu: Option<i32>,
pids: Option<Vec<i32>>,
}
impl CounterBuilder {
/// Specify the CPU number that the PMC is to be allocated on.
///
/// Defaults to all CPUs ([`CPU_ANY`]).
pub fn set_cpu(self, cpu: i32) -> Self {
Self {
cpu: Some(cpu),
..self
}
}
/// Attach a counter to the specified PID(s).
///
/// When set, this causes the PMC to be allocated in process-scoped counting
/// mode ([`pmc_mode_PMC_MODE_TC`] - see `man pmc`).
///
/// # PID 0
///
/// PID 0 is a magic value, attaching to it causes the counter to be
/// attached to the current (caller's) PID.
pub fn attach_to(self, pids: impl Into<Vec<i32>>) -> Self {
Self {
pids: Some(pids.into()),
..self
}
}
/// Allocate a PMC with the specified configuration, and attach to the
/// target PIDs (if any).
pub fn allocate(&self, event_spec: impl Into<String>) -> Result<Counter, Error> {
Counter::new(event_spec, self.cpu, self.pids.clone())
}
}
#[derive(Debug)]
struct AttachHandle {
id: pmc_id_t,
pid: i32,
}
impl Drop for AttachHandle {
fn drop(&mut self) {
// BUG: do not attempt to detach from pid 0 or risk live-locking the
// machine.
//
// https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=227041
//
if self.pid != 0 {
unsafe { pmc_detach(self.id, self.pid) };
}
}
}
/// A handle to a running PMC counter.
///
/// Dropping this handle causes the counter to stop recording events.
pub struct Running<'a> {
counter: &'a mut Counter,
}
impl<'a> Running<'a> {
/// Read the current counter value.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let handle = counter.start()?;
///
/// println!("instructions: {}", handle.read()?);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn read(&self) -> Result<u64, Error> {
self.counter.read()
}
/// Set the value of the counter.
pub fn set(&mut self, value: u64) -> Result<u64, Error> {
self.counter.set(value)
}
/// Stop the counter from recording new events.
pub fn stop(self) {
drop(self)
}
}
impl<'a> std::fmt::Display for Running<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.counter.fmt(f)
}
}
impl<'a> Drop for Running<'a> {
fn drop(&mut self) {
unsafe { pmc_stop(self.counter.id) };
}
}
/// An allocated PMC counter.
///
/// Counters are initialised using the [`CounterBuilder`] type.
///
/// ```no_run
/// use std::{thread, time::Duration};
///
/// let instr = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let handle = instr.start()?;
///
/// // Stop the counter after 5 seconds
/// thread::sleep(Duration::from_secs(5));
/// handle.stop();
///
/// println!("instructions: {}", instr.read()?);
/// #
/// # Ok::<(), Error>(())
/// ```
#[derive(Debug)]
pub struct Counter {
id: pmc_id_t,
attached: Option<Vec<AttachHandle>>,
}
impl Counter {
fn new(
event_spec: impl Into<String>,
cpu: Option<i32>,
pids: Option<Vec<i32>>,
) -> Result<Self, Error> {
// If there's any pids, request a process counter, otherwise a
// system-wide counter.
let pmc_mode = if pids.is_none() {
pmc_mode_PMC_MODE_SC
} else {
pmc_mode_PMC_MODE_TC
};
// It appears pmc_allocate isn't thread safe, so take a lock while
// calling it.
let _guard = BIG_FAT_LOCK.lock().unwrap();
init_pmc_once()?;
signal::check()?;
| // Allocate the PMC
let mut id = 0;
if unsafe {
pmc_allocate(
c_spec.as_ptr(),
pmc_mode,
0,
cpu.unwrap_or(CPU_ANY),
&mut id,
0,
)
} != 0
{
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::EINVAL) => Err(new_os_error(ErrorKind::AllocInit)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
// Initialise the counter so dropping it releases the PMC
let mut c = Counter { id, attached: None };
// Attach to pids, if any, and collect handles so dropping them later
// causes them to detach.
//
// The handles MUST be dropped before the Counter instance.
if let Some(pids) = pids {
let mut handles = vec![];
for pid in pids {
if unsafe { pmc_attach(id, pid) } != 0 {
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::EBUSY) => unreachable!(),
Some(libc::EEXIST) => Err(new_os_error(ErrorKind::AlreadyAttached)),
Some(libc::EPERM) => Err(new_os_error(ErrorKind::Forbidden)),
Some(libc::EINVAL) | Some(libc::ESRCH) => {
Err(new_os_error(ErrorKind::BadTarget))
}
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
handles.push(AttachHandle { id, pid })
}
c.attached = Some(handles)
}
Ok(c)
}
/// Start this counter.
///
/// The counter stops when the returned [`Running`] handle is dropped.
#[must_use = "counter only runs until handle is dropped"]
pub fn start(&mut self) -> Result<Running<'_>, Error> {
signal::check()?;
if unsafe { pmc_start(self.id) } != 0 {
return match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(EDOOFUS) => Err(new_os_error(ErrorKind::LogFileRequired)),
Some(libc::ENXIO) => Err(new_os_error(ErrorKind::BadScope)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
Ok(Running { counter: self })
}
/// Read the counter value.
///
/// This call is valid for both running, stopped, and unused counters.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let r1 = counter.read()?;
/// let r2 = counter.read()?;
///
/// // A counter that is not running does not advance
/// assert!(r2 == r1);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn read(&self) -> Result<u64, Error> {
signal::check()?;
let mut value: u64 = 0;
if unsafe { pmc_read(self.id, &mut value) } != 0 {
return Err(new_os_error(ErrorKind::Unknown));
}
Ok(value)
}
/// Set an explicit counter value.
///
/// ```no_run
/// let mut counter = CounterConfig::default()
/// .attach_to(vec![0])
/// .allocate("inst_retired.any")?;
///
/// let r1 = counter.set(42)?;
/// // The previous value is returned when setting a new value
/// assert_eq!(r1, 0);
///
/// // Reading the counter returns the value set
/// let r2 = counter.read()?;
/// assert_eq!(r2, 42);
/// #
/// # Ok::<(), Error>(())
/// ```
pub fn set(&mut self, value: u64) -> Result<u64, Error> {
signal::check()?;
let mut old: u64 = 0;
if unsafe { pmc_rw(self.id, value, &mut old) } != 0 {
let err = io::Error::last_os_error();
return match io::Error::raw_os_error(&err) {
Some(libc::EBUSY) => panic!("{}", err.to_string()),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
}
Ok(old)
}
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self.read() {
Ok(v) => write!(f, "{}", v),
Err(e) => write!(f, "error: {}", e),
}
}
}
impl Drop for Counter {
fn drop(&mut self) {
let _guard = BIG_FAT_LOCK.lock().unwrap();
// The handles MUST be dropped before the Counter instance
self.attached = None;
unsafe {
pmc_release(self.id);
}
}
}
fn init_pmc_once() -> Result<(), Error> {
let mut maybe_err = Ok(());
PMC_INIT.call_once(|| {
if unsafe { pmc_init() } != 0 {
maybe_err = match io::Error::raw_os_error(&io::Error::last_os_error()) {
Some(libc::ENOENT) => Err(new_os_error(ErrorKind::Init)),
Some(libc::ENXIO) => Err(new_os_error(ErrorKind::Unsupported)),
Some(libc::EPROGMISMATCH) => Err(new_os_error(ErrorKind::VersionMismatch)),
_ => Err(new_os_error(ErrorKind::Unknown)),
};
return;
}
// Register the signal handler
signal::watch_for(&[libc::SIGBUS, libc::SIGIO]);
});
maybe_err
} | let c_spec =
CString::new(event_spec.into()).map_err(|_| new_error(ErrorKind::InvalidEventSpec))?;
| random_line_split |
sync.js | /*
* image sync plugin (September 2, 2018)
* whenever an operation is performed on this image, sync the target images
*/
/*global JS9, $ */
"use strict";
JS9.Sync = {};
JS9.Sync.CLASS = "JS9"; // class of plugins (1st part of div class)
JS9.Sync.NAME = "Sync"; // name of this plugin (2nd part of div class)
// process ops input to [un]sync
// called in image context
JS9.Sync.getOps = function(ops){
let i, j, op;
const xops = [];
// default from above ...
ops = ops || JS9.globalOpts.syncOps;
if( !$.isArray(ops) ){
try{ ops = JSON.parse(ops); }
catch(e){ ops = [ops]; }
}
for(i=0, j=0; i<ops.length; i++){
op = ops[i];
switch(op){
case "wcs":
// wcs is actually two operations
xops[j++] = "wcssys";
xops[j++] = "wcunits";
break;
default:
xops[j++] = op;
break;
}
}
return xops;
};
// process ims input to [un]sync
// called in image context
JS9.Sync.getIms = function(ims){
let i, j, xim;
const xims = [];
ims = ims || JS9.images;
if( !$.isArray(ims) ){
try{ ims = JSON.parse(ims); }
catch(e){ ims = [ims]; }
}
for(i=0, j=0; i<ims.length; i++){
// if image ids were passed, look up corresponding image objects
if( typeof ims[i] === "string" ){
xim = JS9.lookupImage(ims[i]);
} else {
xim = ims[i];
}
// exclude the originating image
if( xim &&
(xim.id !== this.id || (xim.display.id !== this.display.id)) ){
xims[j++] = xim;
}
}
return xims;
};
// sync image(s) when operations are performed on an originating image
// called in the image context
JS9.Sync.sync = function(...args){
let i, j, xop, xim, xops, xims, xlen;
let [ops, ims, opts] = args;
const arr = [];
// make sure sink object exists
this.syncs = this.syncs || {active: true};
// opts is optional
opts = opts || {reciprocate: JS9.globalOpts.syncReciprocate};
if( typeof opts === "string" ){
try{ opts = JSON.parse(opts); }
catch(e){ JS9.error(`can't parse sync opts: ${opts}`, e); }
}
// 1 boolean arg: turn on/off sync'ing
if( args.length === 1 && typeof ops === "boolean" ){
this.syncs.active = ops;
return;
}
// get regularized args
xops = JS9.Sync.getOps.call(this, ops);
xims = JS9.Sync.getIms.call(this, ims);
xlen = xims.length;
// reverse current image and target images?
if( opts.reverse ) |
// for each op (colormap, pan, etc.)
for(i=0; i<xops.length; i++){
// current op
xop = xops[i];
this.syncs[xop] = this.syncs[xop] || [];
ims = this.syncs[xop];
// add images not already in the list
for(j=0; j<xlen; j++){
xim = xims[j];
if( $.inArray(xim, ims) < 0 ){
// add to list
ims.push(xim);
// we'll sync each new target image
arr.push({im: this, xim: xim, xop: xop, xarg: null});
}
}
}
// reciprocal sync'ing between all images?
if( opts.reciprocate ){
JS9.Sync.reciprocating = true;
opts.reciprocate = false;
for(i=0, xim=this; i<xlen; i++){
xims.push(xim);
xim = xims.shift();
JS9.Sync.sync.call(xim, xops, xims, opts);
}
delete JS9.Sync.reciprocating;
}
// use wcs for syncing
if( JS9.notNull(opts.syncwcs) ){
this.tmp.syncwcs = opts.syncwcs;
} else {
this.tmp.syncwcs = JS9.globalOpts.syncWCS;
}
// sync target image, if necessary
if( !JS9.Sync.reciprocating ){
// sync the target images
JS9.Sync.xeqSync.call(this, arr);
// flag we are ready to sync on user events
JS9.Sync.ready = true;
}
};
// unsync one or more images
// called in the image context
JS9.Sync.unsync = function(ops, ims, opts){
let i, op, tims, xops, xims, xlen, xim;
// sanity check
if( !this.syncs ){
return;
}
// opts is optional
opts = opts || {reciprocate: JS9.globalOpts.syncReciprocate};
// get regularized args
xops = JS9.Sync.getOps.call(this, ops);
xims = JS9.Sync.getIms.call(this, ims);
xlen = xims.length;
// reverse current image and target images?
if( opts.reverse ){
delete opts.reverse;
for(i=0; i<xlen; i++){
JS9.Sync.unsync.call(xims[i], xops, [this]);
}
return;
}
// for each op in this image ...
for( op of Object.keys(this.syncs) ){
// skip this op if its not in the specified op list
if( xops && $.inArray(op, xops) < 0 ){
continue;
}
// if no target images specified, delete the whole thing
if( !xims ){
delete this.syncs[op];
} else {
// get target image array for this image
tims = this.syncs[op];
// for each target image ...
for(i=tims.length-1; i>=0; i--){
// remove if it was specified for removal
if( $.inArray(tims[i], xims) >= 0 ){
tims.splice(i, 1);
}
}
// remove empty target image array
if( !tims.length ){
delete this.syncs[op];
}
}
}
// remove empty sink object from image
if( !Object.keys(this.syncs).length ){
delete this.syncs;
}
// reciprocal sync'ing between all images?
if( opts.reciprocate ){
JS9.Sync.reciprocating = true;
opts.reciprocate = false;
for(i=0, xim=this; i<xlen; i++){
xims.push(xim);
xim = xims.shift();
JS9.Sync.unsync.call(xim, xops, xims, opts);
}
delete JS9.Sync.reciprocating;
}
};
// perform a sync action on target images using params from originating image
// called in image context
JS9.Sync.xeqSync = function(arr){
let i, j, k, obj, pos, wcscen, xim, xarr, xobj, xdata, key, diff;
let mydata, myobj, myid, rarr, rstr, args, nflip;
let displays = {};
const oval = JS9.globalOpts.xeqPlugins;
const thisid = `${this.id}_${this.display.id}`;
const regmatch = (r1, r2) => {
// check for a target region with the same syncid as the current region
if( !r1.data || !r1.data.syncid ){ return false; }
if( !r2.data || !r2.data.syncid ){ return false; }
return r1.data.syncid === r2.data.syncid;
};
const calcFlip = (flip) => {
let i, arr;
let nx = 0;
let ny = 0;
let nflip = "";
arr = flip.split("");
for(i=0; i<arr.length; i++){
switch(arr[i]){
case "x":
nx++;
break;
case "y":
ny++;
break;
}
}
if( nx % 2 === 1 ){ nflip += "x"; }
if( ny % 2 === 1 ){ nflip += "y"; }
return nflip || "";
}
// don't recurse!
if( this.tmp.syncRunning ){ return; }
this.tmp.syncRunning = true;
// sync all target images with this operation (but swallow errors)
try{
// do regions first to avoid problems with changes to the current image
for(i=0; i<arr.length; i++){
obj = arr[i];
if( obj.xop === "regions" ){
arr.splice(i, 1);
arr.unshift(obj);
}
}
// process all operations
for(i=0; i<arr.length; i++){
obj = arr[i];
xim = obj.xim;
// don't recurse on target image
if( xim.syncs ){
if( xim.tmp.syncRunning ){ continue; }
xim.tmp.syncRunning = true;
// if image is not displayed, we'll need to redisplay original
if( xim !== xim.display.image ){
if( !displays[xim.display.id] ){
displays[xim.display.id] = xim.display.image;
}
}
}
try{
switch(obj.xop){
case "alignment":
xim.alignPanZoom(this);
break;
case "colormap":
xim.setColormap(this.params.colormap);
break;
case "contrastbias":
xim.setColormap(this.params.contrast, this.params.bias);
break;
case "flip":
if( this.params.flip != xim.params.flip ){
nflip = calcFlip(this.params.flip + xim.params.flip);
xim.setFlip(nflip);
}
break;
case "pan":
pos = this.getPan();
if( this.tmp.syncwcs && this.validWCS() ){
wcscen = JS9.pix2wcs(this.raw.wcs, pos.ox, pos.oy);
xim.setPan({wcs: wcscen});
} else {
xim.setPan(pos.ox, pos.oy);
}
break;
case "regions":
// reset args
args = [];
xarr = null;
if( obj.xarg ){
// region object of the current region
args.push(obj.xarg);
} else {
// Try to sync all regions in the current image to
// regions in the target. We will add regions which do
// not exist in the target, and update those which do.
if( !rarr ){
// get current regions, if necessary
rarr = this.getShapes("regions", "all");
}
// get regions in the target
xarr = xim.getShapes("regions", "all");
// sync all current regions to the target,
// either adding or updating
for(j=0; j<rarr.length; j++){
// assume we will create a new region
rarr[j].mode = "add";
// look through the target regions
for(k=0; k<xarr.length; k++){
// if target matches the current region ...
if( regmatch(xarr[k], rarr[j]) ){
// update it as an existing region
rarr[j].mode = "update";
break;
}
}
// we'll either add or update this region
args.push(rarr[j]);
}
}
// process all regions ...
for(j=0; j<args.length; j++){
// get a copy of the regions object so we can change it
myobj = $.extend(true, {}, args[j]);
// get a sync id
if( myobj.data && myobj.data.syncid ){
// reuse its syncid, if possible
myid = myobj.data.syncid;
} else {
// otherwise, make up our own syncid
myid = `${thisid}_${myobj.id}`;
}
// process the action for this region ...
switch(myobj.mode){
case "add":
// data object with syncid
mydata = {doexport: false, syncid: myid};
// add the syncid to the new region in this display
JS9.globalOpts.xeqPlugins = false;
this.changeShapes("regions",
myobj.id, {data: mydata});
JS9.globalOpts.xeqPlugins = oval;
// get the region object for this region
rstr = this.listRegions(myobj.id, {mode: 1});
// use it to add this region to the target
xim.addShapes("regions", rstr, {data: mydata});
break;
case "remove":
// get all regions in the target
if( !xarr ){
xarr = xim.getShapes("regions", "all");
}
for(k=0; k<xarr.length; k++){
xobj = xarr[k];
xdata = xobj.data;
// skip unsync'ed regions
if( !xdata || !xdata.syncid ){ continue; }
// if this region is sync'ed remove it
if( xdata.syncid === myid ){
// remove region from the target
xim.removeShapes("regions", myid);
}
}
break;
case "move":
case "update":
// account for difference in image scales, angles
// no scale factor
delete myobj.sizeScale;
if( this.raw.wcsinfo && xim.raw.wcsinfo ){
// scale factor
if( xim.raw.wcsinfo.cdelt1 ){
myobj.sizeScale =
this.raw.wcsinfo.cdelt1 / xim.raw.wcsinfo.cdelt1;
}
// angle for shapes accepting angles
if( xim.raw.wcsinfo.crot ){
if( myobj.shape === "box" ||
myobj.shape === "ellipse" ||
(myobj.shape === "text" &&
!myobj.parent) ){
myobj.angle += xim.raw.wcsinfo.crot;
}
}
}
// get target regions, if necessary
if( !xarr ){
xarr = xim.getShapes("regions", "all");
}
for(k=0; k<xarr.length; k++){
xobj = xarr[k];
xdata = xobj.data;
if( !xdata || !xdata.syncid ){ continue; }
if( xdata.syncid === myid ){
// apply changes to target region
xim.changeShapes("regions", myid, myobj);
}
}
break;
}
}
break;
case "rot90":
if( this.params.rot90 != xim.params.rot90 ){
diff = (this.params.rot90 - xim.params.rot90) || 0;
switch(diff){
case 90:
case -270:
xim.setRot90(90);
break;
case -90:
case 270:
xim.setRot90(-90);
break;
default:
break;
}
}
break;
case "rotate":
if( this.params.rotate != xim.params.rotate ){
diff = (this.params.rotate - xim.params.rotate) || 0;
xim.setRotate(diff);
}
break;
case "scale":
xim.setScale(this.params.scale);
break;
case "wcssys":
xim.setWCSSys(this.params.wcssys);
break;
case "wcsunits":
xim.setWCSUnits(this.params.wcsunits);
break;
case "zoom":
xim.setZoom(this.params.zoom);
break;
}
}
catch(e){ /* empty */ }
finally{
// done sync'ing
delete xim.tmp.syncRunning;
}
}
// revert to display of orginal image where necessary
for( key of Object.keys(displays) ){
displays[key].displayImage();
}
}
catch(e){ /* empty */ }
finally{
this.displayImage();
delete this.tmp.syncRunning;
}
};
// sync images, if necessary
// inner routine called by JS9.xeqPlugins callbacks
// called in image context
JS9.Sync.maybeSync = function(ops, arg){
let i, j, ims, op, arr;
// sanity check
if( !JS9.Sync.ready || !this.syncs || this.tmp.syncRunning ){
return;
}
if( !$.isArray(ops) ){
ops = [ops];
}
// do we need to sync images for this operation?
if( this.syncs.active ){
for(j=0; j<ops.length; j++){
op = ops[j];
if( $.isArray(this.syncs[op]) && this.syncs[op].length ){
// setup sync of all target images
ims = this.syncs[op];
arr = [];
for(i=0; i<ims.length; i++){
arr.push({xim: ims[i], xop: op, xarg: arg});
}
// sync target images
JS9.Sync.xeqSync.call(this, arr);
}
}
}
};
// called when plugin is intialized on a display
JS9.Sync.init = function(){
return this;
};
// callbacks which can be synchronized:
// onsetcolormap
JS9.Sync.setcolormap = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "colormap");
};
// onchangecontrastbias
JS9.Sync.changecontrastbias = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "contrastbias");
};
// onsetflip
JS9.Sync.setflip = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["flip","alignment"]);
};
// onsetpan
JS9.Sync.setpan = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["pan","alignment"]);
};
// onsetrot90
JS9.Sync.setrot90 = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["rot90","alignment"]);
};
// onsetrotate
JS9.Sync.setrotate = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["rotate","alignment"]);
};
// onregionschange
JS9.Sync.regionschange = function(im, xreg){
if( !im ){ return; }
switch(xreg.mode){
case "select":
case "unselect":
break;
default:
JS9.Sync.maybeSync.call(im, "regions", xreg);
break;
}
};
// onsetscale
JS9.Sync.setscale = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "scale");
};
// onsetwssys
JS9.Sync.setwcssys = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "wcssys");
};
// onsetwcsunits
JS9.Sync.setwcsunits = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "wcsunits");
};
// onsetzoom
JS9.Sync.setzoom = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["zoom","alignment"]);
};
// re-init other syncUI interfaces when an image is loaded here
JS9.Sync.loadimage = function(im){
let i, pinst;
if( !im ){ return; }
for(i=0; i<JS9.displays.length; i++){
pinst = JS9.displays[i].pluginInstances.JS9SyncUI;
if( pinst && pinst.isActive() ){
JS9.SyncUI.init.call(pinst);
}
}
};
// clean up an image when its closed
// re-init other syncUI interfaces when an image is closed here
JS9.Sync.closeimage = function(im){
let i, pinst;
if( !im ){ return; }
// remove this image from all other image sync lists
for(i=0; i<JS9.images.length; i++){
JS9.Sync.unsync.call(JS9.images[i], null, [im]);
}
for(i=0; i<JS9.displays.length; i++){
pinst = JS9.displays[i].pluginInstances.JS9SyncUI;
if( pinst && pinst.isActive() ){
JS9.SyncUI.init.call(pinst);
}
}
};
// add to image prototype and create public API
JS9.Image.prototype.syncImages = JS9.Sync.sync;
JS9.mkPublic("SyncImages", "syncImages");
JS9.Image.prototype.unsyncImages = JS9.Sync.unsync;
JS9.mkPublic("UnsyncImages", "unsyncImages");
// register the plugin
JS9.RegisterPlugin(JS9.Sync.CLASS, JS9.Sync.NAME, JS9.Sync.init,
{onsetcolormap: JS9.Sync.setcolormap,
onsetflip: JS9.Sync.setflip,
onsetpan: JS9.Sync.setpan,
onregionschange: JS9.Sync.regionschange,
onsetrot90: JS9.Sync.setrot90,
onsetrotate: JS9.Sync.setrotate,
onsetscale: JS9.Sync.setscale,
onsetwcssys: JS9.Sync.setwcssys,
onsetwcsunits: JS9.Sync.setwcsunits,
onsetzoom: JS9.Sync.setzoom,
onchangecontrastbias: JS9.Sync.changecontrastbias,
onimageload: JS9.Sync.loadimage,
onimageclose: JS9.Sync.closeimage,
winDims: [0, 0]});
| {
delete opts.reverse;
for(i=0; i<xlen; i++){
JS9.Sync.sync.call(xims[i], xops, [this]);
}
return;
} | conditional_block |
sync.js | /*
* image sync plugin (September 2, 2018)
* whenever an operation is performed on this image, sync the target images
*/
/*global JS9, $ */
"use strict";
JS9.Sync = {};
JS9.Sync.CLASS = "JS9"; // class of plugins (1st part of div class)
JS9.Sync.NAME = "Sync"; // name of this plugin (2nd part of div class)
// process ops input to [un]sync
// called in image context
JS9.Sync.getOps = function(ops){
let i, j, op;
const xops = [];
// default from above ...
ops = ops || JS9.globalOpts.syncOps;
if( !$.isArray(ops) ){
try{ ops = JSON.parse(ops); }
catch(e){ ops = [ops]; }
}
for(i=0, j=0; i<ops.length; i++){
op = ops[i];
switch(op){
case "wcs":
// wcs is actually two operations
xops[j++] = "wcssys";
xops[j++] = "wcunits";
break;
default:
xops[j++] = op;
break;
}
}
return xops;
};
// process ims input to [un]sync
// called in image context
JS9.Sync.getIms = function(ims){
let i, j, xim;
const xims = [];
ims = ims || JS9.images;
if( !$.isArray(ims) ){
try{ ims = JSON.parse(ims); }
catch(e){ ims = [ims]; }
}
for(i=0, j=0; i<ims.length; i++){
// if image ids were passed, look up corresponding image objects
if( typeof ims[i] === "string" ){
xim = JS9.lookupImage(ims[i]); | if( xim &&
(xim.id !== this.id || (xim.display.id !== this.display.id)) ){
xims[j++] = xim;
}
}
return xims;
};
// sync image(s) when operations are performed on an originating image
// called in the image context
JS9.Sync.sync = function(...args){
let i, j, xop, xim, xops, xims, xlen;
let [ops, ims, opts] = args;
const arr = [];
// make sure sink object exists
this.syncs = this.syncs || {active: true};
// opts is optional
opts = opts || {reciprocate: JS9.globalOpts.syncReciprocate};
if( typeof opts === "string" ){
try{ opts = JSON.parse(opts); }
catch(e){ JS9.error(`can't parse sync opts: ${opts}`, e); }
}
// 1 boolean arg: turn on/off sync'ing
if( args.length === 1 && typeof ops === "boolean" ){
this.syncs.active = ops;
return;
}
// get regularized args
xops = JS9.Sync.getOps.call(this, ops);
xims = JS9.Sync.getIms.call(this, ims);
xlen = xims.length;
// reverse current image and target images?
if( opts.reverse ){
delete opts.reverse;
for(i=0; i<xlen; i++){
JS9.Sync.sync.call(xims[i], xops, [this]);
}
return;
}
// for each op (colormap, pan, etc.)
for(i=0; i<xops.length; i++){
// current op
xop = xops[i];
this.syncs[xop] = this.syncs[xop] || [];
ims = this.syncs[xop];
// add images not already in the list
for(j=0; j<xlen; j++){
xim = xims[j];
if( $.inArray(xim, ims) < 0 ){
// add to list
ims.push(xim);
// we'll sync each new target image
arr.push({im: this, xim: xim, xop: xop, xarg: null});
}
}
}
// reciprocal sync'ing between all images?
if( opts.reciprocate ){
JS9.Sync.reciprocating = true;
opts.reciprocate = false;
for(i=0, xim=this; i<xlen; i++){
xims.push(xim);
xim = xims.shift();
JS9.Sync.sync.call(xim, xops, xims, opts);
}
delete JS9.Sync.reciprocating;
}
// use wcs for syncing
if( JS9.notNull(opts.syncwcs) ){
this.tmp.syncwcs = opts.syncwcs;
} else {
this.tmp.syncwcs = JS9.globalOpts.syncWCS;
}
// sync target image, if necessary
if( !JS9.Sync.reciprocating ){
// sync the target images
JS9.Sync.xeqSync.call(this, arr);
// flag we are ready to sync on user events
JS9.Sync.ready = true;
}
};
// unsync one or more images
// called in the image context
JS9.Sync.unsync = function(ops, ims, opts){
let i, op, tims, xops, xims, xlen, xim;
// sanity check
if( !this.syncs ){
return;
}
// opts is optional
opts = opts || {reciprocate: JS9.globalOpts.syncReciprocate};
// get regularized args
xops = JS9.Sync.getOps.call(this, ops);
xims = JS9.Sync.getIms.call(this, ims);
xlen = xims.length;
// reverse current image and target images?
if( opts.reverse ){
delete opts.reverse;
for(i=0; i<xlen; i++){
JS9.Sync.unsync.call(xims[i], xops, [this]);
}
return;
}
// for each op in this image ...
for( op of Object.keys(this.syncs) ){
// skip this op if its not in the specified op list
if( xops && $.inArray(op, xops) < 0 ){
continue;
}
// if no target images specified, delete the whole thing
if( !xims ){
delete this.syncs[op];
} else {
// get target image array for this image
tims = this.syncs[op];
// for each target image ...
for(i=tims.length-1; i>=0; i--){
// remove if it was specified for removal
if( $.inArray(tims[i], xims) >= 0 ){
tims.splice(i, 1);
}
}
// remove empty target image array
if( !tims.length ){
delete this.syncs[op];
}
}
}
// remove empty sink object from image
if( !Object.keys(this.syncs).length ){
delete this.syncs;
}
// reciprocal sync'ing between all images?
if( opts.reciprocate ){
JS9.Sync.reciprocating = true;
opts.reciprocate = false;
for(i=0, xim=this; i<xlen; i++){
xims.push(xim);
xim = xims.shift();
JS9.Sync.unsync.call(xim, xops, xims, opts);
}
delete JS9.Sync.reciprocating;
}
};
// perform a sync action on target images using params from originating image
// called in image context
JS9.Sync.xeqSync = function(arr){
let i, j, k, obj, pos, wcscen, xim, xarr, xobj, xdata, key, diff;
let mydata, myobj, myid, rarr, rstr, args, nflip;
let displays = {};
const oval = JS9.globalOpts.xeqPlugins;
const thisid = `${this.id}_${this.display.id}`;
const regmatch = (r1, r2) => {
// check for a target region with the same syncid as the current region
if( !r1.data || !r1.data.syncid ){ return false; }
if( !r2.data || !r2.data.syncid ){ return false; }
return r1.data.syncid === r2.data.syncid;
};
const calcFlip = (flip) => {
let i, arr;
let nx = 0;
let ny = 0;
let nflip = "";
arr = flip.split("");
for(i=0; i<arr.length; i++){
switch(arr[i]){
case "x":
nx++;
break;
case "y":
ny++;
break;
}
}
if( nx % 2 === 1 ){ nflip += "x"; }
if( ny % 2 === 1 ){ nflip += "y"; }
return nflip || "";
}
// don't recurse!
if( this.tmp.syncRunning ){ return; }
this.tmp.syncRunning = true;
// sync all target images with this operation (but swallow errors)
try{
// do regions first to avoid problems with changes to the current image
for(i=0; i<arr.length; i++){
obj = arr[i];
if( obj.xop === "regions" ){
arr.splice(i, 1);
arr.unshift(obj);
}
}
// process all operations
for(i=0; i<arr.length; i++){
obj = arr[i];
xim = obj.xim;
// don't recurse on target image
if( xim.syncs ){
if( xim.tmp.syncRunning ){ continue; }
xim.tmp.syncRunning = true;
// if image is not displayed, we'll need to redisplay original
if( xim !== xim.display.image ){
if( !displays[xim.display.id] ){
displays[xim.display.id] = xim.display.image;
}
}
}
try{
switch(obj.xop){
case "alignment":
xim.alignPanZoom(this);
break;
case "colormap":
xim.setColormap(this.params.colormap);
break;
case "contrastbias":
xim.setColormap(this.params.contrast, this.params.bias);
break;
case "flip":
if( this.params.flip != xim.params.flip ){
nflip = calcFlip(this.params.flip + xim.params.flip);
xim.setFlip(nflip);
}
break;
case "pan":
pos = this.getPan();
if( this.tmp.syncwcs && this.validWCS() ){
wcscen = JS9.pix2wcs(this.raw.wcs, pos.ox, pos.oy);
xim.setPan({wcs: wcscen});
} else {
xim.setPan(pos.ox, pos.oy);
}
break;
case "regions":
// reset args
args = [];
xarr = null;
if( obj.xarg ){
// region object of the current region
args.push(obj.xarg);
} else {
// Try to sync all regions in the current image to
// regions in the target. We will add regions which do
// not exist in the target, and update those which do.
if( !rarr ){
// get current regions, if necessary
rarr = this.getShapes("regions", "all");
}
// get regions in the target
xarr = xim.getShapes("regions", "all");
// sync all current regions to the target,
// either adding or updating
for(j=0; j<rarr.length; j++){
// assume we will create a new region
rarr[j].mode = "add";
// look through the target regions
for(k=0; k<xarr.length; k++){
// if target matches the current region ...
if( regmatch(xarr[k], rarr[j]) ){
// update it as an existing region
rarr[j].mode = "update";
break;
}
}
// we'll either add or update this region
args.push(rarr[j]);
}
}
// process all regions ...
for(j=0; j<args.length; j++){
// get a copy of the regions object so we can change it
myobj = $.extend(true, {}, args[j]);
// get a sync id
if( myobj.data && myobj.data.syncid ){
// reuse its syncid, if possible
myid = myobj.data.syncid;
} else {
// otherwise, make up our own syncid
myid = `${thisid}_${myobj.id}`;
}
// process the action for this region ...
switch(myobj.mode){
case "add":
// data object with syncid
mydata = {doexport: false, syncid: myid};
// add the syncid to the new region in this display
JS9.globalOpts.xeqPlugins = false;
this.changeShapes("regions",
myobj.id, {data: mydata});
JS9.globalOpts.xeqPlugins = oval;
// get the region object for this region
rstr = this.listRegions(myobj.id, {mode: 1});
// use it to add this region to the target
xim.addShapes("regions", rstr, {data: mydata});
break;
case "remove":
// get all regions in the target
if( !xarr ){
xarr = xim.getShapes("regions", "all");
}
for(k=0; k<xarr.length; k++){
xobj = xarr[k];
xdata = xobj.data;
// skip unsync'ed regions
if( !xdata || !xdata.syncid ){ continue; }
// if this region is sync'ed remove it
if( xdata.syncid === myid ){
// remove region from the target
xim.removeShapes("regions", myid);
}
}
break;
case "move":
case "update":
// account for difference in image scales, angles
// no scale factor
delete myobj.sizeScale;
if( this.raw.wcsinfo && xim.raw.wcsinfo ){
// scale factor
if( xim.raw.wcsinfo.cdelt1 ){
myobj.sizeScale =
this.raw.wcsinfo.cdelt1 / xim.raw.wcsinfo.cdelt1;
}
// angle for shapes accepting angles
if( xim.raw.wcsinfo.crot ){
if( myobj.shape === "box" ||
myobj.shape === "ellipse" ||
(myobj.shape === "text" &&
!myobj.parent) ){
myobj.angle += xim.raw.wcsinfo.crot;
}
}
}
// get target regions, if necessary
if( !xarr ){
xarr = xim.getShapes("regions", "all");
}
for(k=0; k<xarr.length; k++){
xobj = xarr[k];
xdata = xobj.data;
if( !xdata || !xdata.syncid ){ continue; }
if( xdata.syncid === myid ){
// apply changes to target region
xim.changeShapes("regions", myid, myobj);
}
}
break;
}
}
break;
case "rot90":
if( this.params.rot90 != xim.params.rot90 ){
diff = (this.params.rot90 - xim.params.rot90) || 0;
switch(diff){
case 90:
case -270:
xim.setRot90(90);
break;
case -90:
case 270:
xim.setRot90(-90);
break;
default:
break;
}
}
break;
case "rotate":
if( this.params.rotate != xim.params.rotate ){
diff = (this.params.rotate - xim.params.rotate) || 0;
xim.setRotate(diff);
}
break;
case "scale":
xim.setScale(this.params.scale);
break;
case "wcssys":
xim.setWCSSys(this.params.wcssys);
break;
case "wcsunits":
xim.setWCSUnits(this.params.wcsunits);
break;
case "zoom":
xim.setZoom(this.params.zoom);
break;
}
}
catch(e){ /* empty */ }
finally{
// done sync'ing
delete xim.tmp.syncRunning;
}
}
// revert to display of orginal image where necessary
for( key of Object.keys(displays) ){
displays[key].displayImage();
}
}
catch(e){ /* empty */ }
finally{
this.displayImage();
delete this.tmp.syncRunning;
}
};
// sync images, if necessary
// inner routine called by JS9.xeqPlugins callbacks
// called in image context
JS9.Sync.maybeSync = function(ops, arg){
let i, j, ims, op, arr;
// sanity check
if( !JS9.Sync.ready || !this.syncs || this.tmp.syncRunning ){
return;
}
if( !$.isArray(ops) ){
ops = [ops];
}
// do we need to sync images for this operation?
if( this.syncs.active ){
for(j=0; j<ops.length; j++){
op = ops[j];
if( $.isArray(this.syncs[op]) && this.syncs[op].length ){
// setup sync of all target images
ims = this.syncs[op];
arr = [];
for(i=0; i<ims.length; i++){
arr.push({xim: ims[i], xop: op, xarg: arg});
}
// sync target images
JS9.Sync.xeqSync.call(this, arr);
}
}
}
};
// called when plugin is intialized on a display
JS9.Sync.init = function(){
return this;
};
// callbacks which can be synchronized:
// onsetcolormap
JS9.Sync.setcolormap = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "colormap");
};
// onchangecontrastbias
JS9.Sync.changecontrastbias = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "contrastbias");
};
// onsetflip
JS9.Sync.setflip = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["flip","alignment"]);
};
// onsetpan
JS9.Sync.setpan = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["pan","alignment"]);
};
// onsetrot90
JS9.Sync.setrot90 = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["rot90","alignment"]);
};
// onsetrotate
JS9.Sync.setrotate = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["rotate","alignment"]);
};
// onregionschange
JS9.Sync.regionschange = function(im, xreg){
if( !im ){ return; }
switch(xreg.mode){
case "select":
case "unselect":
break;
default:
JS9.Sync.maybeSync.call(im, "regions", xreg);
break;
}
};
// onsetscale
JS9.Sync.setscale = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "scale");
};
// onsetwssys
JS9.Sync.setwcssys = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "wcssys");
};
// onsetwcsunits
JS9.Sync.setwcsunits = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, "wcsunits");
};
// onsetzoom
JS9.Sync.setzoom = function(im){
if( !im ){ return; }
JS9.Sync.maybeSync.call(im, ["zoom","alignment"]);
};
// re-init other syncUI interfaces when an image is loaded here
JS9.Sync.loadimage = function(im){
let i, pinst;
if( !im ){ return; }
for(i=0; i<JS9.displays.length; i++){
pinst = JS9.displays[i].pluginInstances.JS9SyncUI;
if( pinst && pinst.isActive() ){
JS9.SyncUI.init.call(pinst);
}
}
};
// clean up an image when its closed
// re-init other syncUI interfaces when an image is closed here
JS9.Sync.closeimage = function(im){
let i, pinst;
if( !im ){ return; }
// remove this image from all other image sync lists
for(i=0; i<JS9.images.length; i++){
JS9.Sync.unsync.call(JS9.images[i], null, [im]);
}
for(i=0; i<JS9.displays.length; i++){
pinst = JS9.displays[i].pluginInstances.JS9SyncUI;
if( pinst && pinst.isActive() ){
JS9.SyncUI.init.call(pinst);
}
}
};
// add to image prototype and create public API
JS9.Image.prototype.syncImages = JS9.Sync.sync;
JS9.mkPublic("SyncImages", "syncImages");
JS9.Image.prototype.unsyncImages = JS9.Sync.unsync;
JS9.mkPublic("UnsyncImages", "unsyncImages");
// register the plugin
JS9.RegisterPlugin(JS9.Sync.CLASS, JS9.Sync.NAME, JS9.Sync.init,
{onsetcolormap: JS9.Sync.setcolormap,
onsetflip: JS9.Sync.setflip,
onsetpan: JS9.Sync.setpan,
onregionschange: JS9.Sync.regionschange,
onsetrot90: JS9.Sync.setrot90,
onsetrotate: JS9.Sync.setrotate,
onsetscale: JS9.Sync.setscale,
onsetwcssys: JS9.Sync.setwcssys,
onsetwcsunits: JS9.Sync.setwcsunits,
onsetzoom: JS9.Sync.setzoom,
onchangecontrastbias: JS9.Sync.changecontrastbias,
onimageload: JS9.Sync.loadimage,
onimageclose: JS9.Sync.closeimage,
winDims: [0, 0]}); | } else {
xim = ims[i];
}
// exclude the originating image | random_line_split |
model_sql.go | package mc
import (
"fmt"
"github.com/spf13/cast"
"gorm.io/gorm"
"reflect"
"strings"
)
//kvs查询选项
type KvsQueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
KvName string //kv配置项名
ExtraWhere []interface{} //额外附加的查询条件
ReturnPath bool //当模型为树型结构时,返回的key是否使用path代替
ExtraFields []string //额外附加的查询字段
Order string //排序
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
}
//数据查询选项
type QueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
ExtraWhere []interface{} //附加的查询条件
Values map[string]interface{} //查询项的值
ExtraFields []string //额外附加的查询字段
Order string //排序
Page int //查询页码(仅对find有效)
PageSize int //查询记录数 (仅对find有效)
NotTotal bool //是否不查询总记录数 (仅对find有效)
NotSearch bool //是否不使用配置查询项进行查询
NotFoot bool //是否查询汇总项 (仅对find有效)
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
NotConvertFromValue bool //不转换from值, 默认false(转换)
AttachFromRealValue bool //是否附加kv及enum字段原值
useModelFiledType string // 取list字段还是edit字段列表 (list|edit)
}
type RowData map[string]interface{}
// 获取模型数据库连接对象本身
// 对此修改会影响模型本身的数据库连接
func (m *Model) DB() *gorm.DB {
return m.db
}
// 获取一个新的模型数据库连接对象
// 对此修改不会影响模型本身的数据库连接
func (m *Model) NewDB() *gorm.DB {
return m.db.Session(&gorm.Session{}).Where("")
}
// 获取一个仅包含连接名及表名的连接对象
// param isAs 表是否带别名
func (m *Model) BaseDB(isAs bool) *gorm.DB {
db := GetDB(m.attr.ConnName)
if isAs {
tb := fmt.Sprintf("%s AS %s", m.attr.Table, m.attr.Alias)
if m.attr.DBName != "" {
tb = fmt.Sprintf("`%s`.%s", m.attr.DBName, tb)
}
db.Table(tb)
} else {
db.Table(m.attr.Table)
}
return db
}
// 获取Kv键值列表
func (m *Model) FindKvs(qo *KvsQueryOption) (desc Kvs, err error) {
//检查选项
if qo == nil {
qo = &KvsQueryOption{KvName: "default"}
}
if qo.KvName == "" {
qo.KvName = "default"
}
if !InArray(qo.KvName, m.attr.Kvs) {
err = fmt.Errorf("配置中不存在 [%s] kv 项配置", qo.KvName)
return
}
//分析kvs查询的字段
fields := m.ParseKvFields(qo.KvName, qo.ExtraFields)
if fields == nil || len(fields) <= 0 {
return
}
//分析kvs查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, nil, true)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Kvs[qo.KvName].Order != "" {
theDB.Order(m.attr.Kvs[qo.KvName].Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//查询
data := make([]map[string]interface{}, 0)
if err = theDB.Select(fields).Find(&data).Error; err != nil {
return
}
//处理结果
desc = make(Kvs)
for i, v := range data {
key := cast.ToString(v["__mc_key"])
//树形
if m.attr.IsTree && qo.ReturnPath {
key = cast.ToString(v[m.attr.Tree.PathField])
}
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
data[i]["__mc_value"] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i]["__mc_value"])
}
desc[key] = v
}
return
}
// 获取一条编辑数据
func (m *Model) TakeForEdit(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
indent := ""
qo.NotConvertFromValue = true
qo.NotSearch = true
qo.TreeIndent = &indent
qo.useModelFiledType = "edit"
return m.Take(qo)
}
// 获取一条list数据
func (m *Model) Take(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
qo.PageSize = 1
qo.Page = 1
qo.NotTotal = true
qo.NotFoot = true
if data, _, _, err := m.Find(qo); err != nil {
return nil, false, err
}else if len(data) < 0 {
return nil, false, nil
}else{
return data[0], true, nil
}
}
// 获取list数据列表
func (m *Model) Find(qo *QueryOption) (desc []map[string]interface{}, foot map[string]interface{}, total int64, err error) {
//检查选项
if qo == nil {
qo = &QueryOption{}
}
//分析查询的字段
fields, footFields := m.ParseFields(qo)
if fields == nil || len(fields) <= 0 {
return
}
//分析查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, qo.Values, qo.NotSearch)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//分页信息
offset, limit := getOffsetLimit(qo.Page, qo.PageSize)
//查询
desc = make([]map[string]interface{}, 0)
db := theDB.Session(&gorm.Session{})
db.Offset(offset).Limit(limit).Select(fields).Find(&desc)
if !qo.NotTotal {
db = theDB.Session(&gorm.Session{})
db.Count(&total)
}
if theDB.Error != nil {
err = theDB.Error
return
}
//汇总
if !qo.NotFoot && footFields != nil && len(footFields) > 0 {
foot = make(map[string]interface{})
if err = theDB.Select(footFields).Offset(0).Limit(1).Take(&foot).Error; err != nil {
return
}
}
err = m.ProcessData(desc, qo)
return
}
// 判断是否已有重复数据
func (m *Model) CheckUnique(data map[string]interface{}, oldPkValue interface{})(err error) {
//如果没有设置唯一字段,且主键是自增时,直接返回不重复
if (m.attr.UniqueFields == nil || len(m.attr.UniqueFields) <= 0) && m.attr.AutoInc {
return
}
db := m.BaseDB(true)
pk := m.FieldAddAlias(m.attr.Pk)
fileTitles := make([]string, 0)
if oldPkValue != nil {
db.Where(fmt.Sprintf("%s <> ?", pk), oldPkValue)
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[pk]].Title)
}
where := ""
whereValue := make([]interface{}, 0)
//检查唯一字段
for _, field := range m.attr.UniqueFields {
if where == "" {
where += fmt.Sprintf(" %s = ?", m.FieldAddAlias(field))
} else {
where += fmt.Sprintf(" AND %s = ?", m.FieldAddAlias(field))
}
whereValue = append(whereValue, data[field])
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[field]].Title)
}
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if where == "" {
where = fmt.Sprintf("%s = ?", pk)
} else {
where = fmt.Sprintf("( %s ) OR ( %s )", where, fmt.Sprintf("%s = ?", pk))
}
whereValue = append(whereValue, data[m.attr.Pk])
}
db.Where(where, whereValue...)
var total int64
if err := db.Count(&total).Error; err != nil {
return err
} else if total > 0 {
return &Result{Message:fmt.Sprintf("记录已存在:【%s】存在重复", strings.Join(fileTitles, "、"))}
}
return nil
}
// 检查必填字段
func (m *Model) CheckRequiredValues(data map[string]interface{}) (err error) {
fieldTitles := make([]string, 0)
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if cast.ToString(data[m.attr.Pk]) == "" {
fieldTitles = append(fieldTitles, m.attr.Fields[m.attr.fieldIndexMap[m.attr.Pk]].Title)
}
}
//检查配置中的必填字段
for _, field := range m.attr.Fields {
if !field.Required {
continue
}
if cast.ToString(data[field.Name]) == "" {
fieldTitles = append(fieldTitles, field.Title)
}
}
if len(fieldTitles) > 0 {
return &Result{Message:fmt.Sprintf("【%s】 字段为必填项", strings.Join(fieldTitles, "、"))}
}
return
}
// 更新记录
func (m *Model) Updates(data map[string]interface{}, oldPkValue interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, oldPkValue); err != nil {
return
}
//更新数据
db := m.BaseDB(false)
db.Where(fmt.Sprintf("`%s` = ?", m.attr.Pk), oldPkValue).Updates(data)
return db.RowsAffected, db.Error
}
// 创建记录
func (m *Model) Create(data map[string]interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, nil); err != nil {
return
}
//创建数据
db := m.BaseDB(false).Create(data)
return db.RowsAffected, db.Error
}
//保存记录(根据pk自动分析是update 或 create)
func (m *Model) Save(data map[string]interface{}, oldPkValue interface{})(rowsAffected int64, err error) {
if oldPkValue == nil { //创建
return m.Create(data)
} else { //更新
return m.Updates(data, oldPkValue)
}
}
//根据PK字段删除记录
func (m *Model) Delete(id interface{}) (rowsAffected int64, err error) {
var delIds interface{}
kind := reflect.TypeOf(id).Kind()
symbol := ""
if kind == reflect.Array || kind == reflect.Slice {
symbol = "IN"
delIds = id
} else {
symbol = "="
delIds = []interface{}{id}
}
db := m.BaseDB(false).Where(fmt.Sprintf("`%s` %s ?", m.attr.Pk, symbol), delIds).Delete(nil)
return db.RowsAffected, db.Error
}
// 分析查询条件 (此批条件只作用于返回的db对象上,不会作用于模型的db上)
// @param extraWhere 额外的查询条件
// @param searchValues 查询字段值
// @param notSearch 是否使用查询字段条件
func (m *Model) ParseWhere(db *gorm.DB, extraWhere []interface{}, searchValues map[string]interface{}, notSearch bool) *gorm.DB {
var theDB *gorm.DB
if db == nil {
theDB = m.NewDB()
} else {
theDB = db.Where("")
}
//额外的查询条件
if extraWhere != nil {
theDB.Where(extraWhere[0], extraWhere[1:]...)
}
// 模型各查询字段
if !notSearch {
searchValues = m.ParseSearchValues(searchValues)
for _, f := range m.attr.SearchFields {
// 该查询字段未带条件配置 或 未传值,跳过
if _, ok := searchValues[f.Name]; !ok {
continue
}
if f.Where == "" {
f.Where = fmt.Sprintf("%s = ?", m.FieldAddAlias(f.Name))
f.Values = []string{"?"}
}
// 查询值与查询条件匹配
values := make([]interface{}, 0)
if f.Between { //范围值
vType := reflect.TypeOf(searchValues[f.Name]).Kind()
var vs []string
if vType == reflect.Array || vType == reflect.Slice {
vs = searchValues[f.Name].([]string)
} else {
vs = strings.Split(cast.ToString(searchValues[f.Name]), f.BetweenSep)
}
for i, v := range f.Values {
if v == "?" {
values = append(values, vs[i])
} else {
values = append(values, strings.ReplaceAll(v, "?", vs[i]))
}
}
} else { //单个值
for _, v := range f.Values {
if v == "?" {
values = append(values, searchValues[f.Name])
} else {
values = append(values, strings.ReplaceAll(v, "?", cast.ToString(searchValues[f.Name])))
}
}
}
theDB.Where(f.Where, values...)
}
}
//受行权限控制的字段进行数据权限过滤
for fieldName, fromInfo := range m.attr.rowAuthFieldMap {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(fromInfo.FromName); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(fieldName)), rowAuth)
}
}
//如果自身也是行权限模型,则进行本身数据权限过滤
if m.attr.isRowAuth {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(m.attr.Name); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(m.attr.Pk)), rowAuth)
}
}
return theDB
}
//分析查询字段
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的字段名数组
// @return footFields 汇总字段
func (m *Model) ParseFields(qo *QueryOption)(fields []string,footFields []string) {
fields = make([]string, 0)
footFields = make([]string, 0)
//扩展字段
fields = append(fields, m.FieldsAddAlias(qo.ExtraFields)...)
// 树型必备字段
if m.attr.IsTree {
fields = append(fields, m.ParseTreeExtraField()...)
}
var modelFields []*ModelField
if strings.ToLower(qo.useModelFiledType) == "edit" {
modelFields = m.attr.editFields
}else{
modelFields = m.attr.listFields
}
for _, field := range modelFields {
//基础字段
fieldName := ""
if field.Alias == "" {
fieldName = m.FieldAddAlias(field.Name)
} else if field.Alias != "" {
fieldName = fmt.Sprintf("%s AS %s", field.Alias, field.Name)
}
fields = append(fields, fieldName)
//汇总字段
if field.Foot != "" {
footFields = append(footFields, fmt.Sprintf("%s AS %s", field.Foot, field.Name))
}
}
return
}
// 分析kv字段数组 (仅对通过NewConfigModel创建的模型有效)
// @param kvName kv配置项名
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的KV字段名数组
func (m *Model) ParseKvFields(kvName string, extraFields []string) (fields []string) {
fields = make([]string, 0)
// kv配置中的字段
kv, ok := ModelKv{}, false
if kv, ok = m.attr.Kvs[kvName]; !ok {
return
}
//keySep := fmt.Sprintf(",'%s',", kv.KeySep)
//valueSep := fmt.Sprintf(",'%s',", kv.ValueSep)
keyField := fmt.Sprintf("%s AS `__mc_key`", m.FieldAddAlias(kv.KeyField))
valueField := fmt.Sprintf("%s AS `__mc_value`", m.FieldAddAlias(kv.ValueField)) | treePathField := m.FieldAddAlias(m.attr.Tree.PathField)
fields = append(append(fields, treePathField), m.ParseTreeExtraField()...)
}
// 附加字段
if extraFields != nil {
fields = append(fields, m.FieldsAddAlias(extraFields)...)
}
return
}
// 给字段加表别名
func (m *Model) FieldAddAlias(field string) string {
if field == "" {
return ""
}
if strings.Contains(field, ".") || strings.Contains(field, "(") {
return field
} else {
return fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(field, " "))
}
}
// 给字段数组加表别名
func (m *Model) FieldsAddAlias(fields []string) []string {
newFields := make([]string, 0)
for _, v := range fields {
if v == "" {
continue
}
if strings.Contains(v, ".") || strings.Contains(v, "(") {
newFields = append(newFields, v)
} else {
newFields = append(newFields, fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(v, " ")))
}
}
return newFields
}
// 对查询的数据进行处理
func (m *Model) ProcessData(data []map[string]interface{}, qo *QueryOption)(err error) {
if data == nil || len(data) <= 0 {
return
}
//序号
if m.attr.Number {
for i, _ := range data {
data[i]["__mc_index"] = (qo.Page -1) * qo.PageSize + i + 1
}
}
//转换成from值
if !qo.NotConvertFromValue {
for _, f := range m.attr.Fields {
if _, ok := data[0][f.Name]; !ok {
continue
}
if f.From != "" {
enum := m.GetFromKvs(f.From)
for i, _ := range data {
if qo.AttachFromRealValue { //附加字段原值真实值
data[i]["__mc_"+f.Name] = data[i][f.Name]
}
vString := cast.ToString(data[i][f.Name]) //字段值
if f.Multiple { //多选
vs := strings.Split(vString, f.Separator)
newVs := make([]string, 0)
for _, v := range vs {
newVs = append(newVs, cast.ToString(enum[v]["__mc_value"]))
}
data[i][f.Name] = strings.Join(newVs, f.Separator)
} else { //单选
data[i][f.Name] = cast.ToString(enum[vString]["__mc_value"])
}
}
}
}
}
//树形
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
for i, _ := range data {
data[i][m.attr.Tree.NameField] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i][m.attr.Tree.NameField])
}
}
return
}
// 分析树形结构查询必须的扩展字段
func (m *Model) ParseTreeExtraField() (field []string) {
pathField := m.FieldAddAlias(m.attr.Tree.PathField)
__mc_pathField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Tree.PathField)
__mc_pkField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Pk)
field = make([]string, 3)
//层级字段
field[0] = fmt.Sprintf("CEILING(LENGTH(%s)/%d) AS `__mc_level`", pathField, m.attr.Tree.PathBit)
//父节点字段
field[1] = fmt.Sprintf("(SELECT %s FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_parent`",
__mc_pkField, m.attr.Table, m.attr.Table, __mc_pathField, pathField, pathField, m.attr.Tree.PathBit)
//字节点数字段
field[2] = fmt.Sprintf("(SELECT count(%s) FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_child_count`",
__mc_pkField, m.attr.Table, m.attr.Table, pathField, __mc_pathField, __mc_pathField, m.attr.Tree.PathBit)
return
} | fields = append(fields, keyField, valueField)
// 树型必备字段
if m.attr.IsTree { | random_line_split |
model_sql.go | package mc
import (
"fmt"
"github.com/spf13/cast"
"gorm.io/gorm"
"reflect"
"strings"
)
//kvs查询选项
type KvsQueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
KvName string //kv配置项名
ExtraWhere []interface{} //额外附加的查询条件
ReturnPath bool //当模型为树型结构时,返回的key是否使用path代替
ExtraFields []string //额外附加的查询字段
Order string //排序
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
}
//数据查询选项
type QueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
ExtraWhere []interface{} //附加的查询条件
Values map[string]interface{} //查询项的值
ExtraFields []string //额外附加的查询字段
Order string //排序
Page int //查询页码(仅对find有效)
PageSize int //查询记录数 (仅对find有效)
NotTotal bool //是否不查询总记录数 (仅对find有效)
NotSearch bool //是否不使用配置查询项进行查询
NotFoot bool //是否查询汇总项 (仅对find有效)
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
NotConvertFromValue bool //不转换from值, 默认false(转换)
AttachFromRealValue bool //是否附加kv及enum字段原值
useModelFiledType string // 取list字段还是edit字段列表 (list|edit)
}
type RowData map[string]interface{}
// 获取模型数据库连接对象本身
// 对此修改会影响模型本身的数据库连接
func (m *Model) DB() *gorm.DB {
return m.db
}
// 获取一个新的模型数据库连接对象
// 对此修改不会影响模型本身的数据库连接
func (m *Model) NewDB() *gorm.DB {
return m.db.Session(&gorm.Session{}).Where("")
}
// 获取一个仅包含连接名及表名的连接对象
// param isAs 表是否带别名
func (m *Model) BaseDB(isAs bool) *gorm.DB {
db := GetDB(m.attr.ConnName)
if isAs {
tb := fmt.Sprintf("%s AS %s", m.attr.Table, m.attr.Alias)
if m.attr.DBName != "" {
tb = fmt.Sprintf("`%s`.%s", m.attr.DBName, tb)
}
db.Table(tb)
} else {
db.Table(m.attr.Table)
}
return db
}
// 获取Kv键值列表
func (m *Model) FindKvs(qo *KvsQueryOption) (desc Kvs, err error) {
//检查选项
if qo == nil {
qo = &KvsQueryOption{KvName: "default"}
}
if qo.KvName == "" {
qo.KvName = "default"
}
if !InArray(qo.KvName, m.attr.Kvs) {
err = fmt.Errorf("配置中不存在 [%s] kv 项配置", qo.KvName)
return
}
//分析kvs查询的字段
fields := m.ParseKvFields(qo.KvName, qo.ExtraFields)
if fields == nil || len(fields) <= 0 {
return
}
//分析kvs查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, nil, true)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Kvs[qo.KvName].Order != "" {
theDB.Order(m.attr.Kvs[qo.KvName].Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//查询
data := make([]map[string]interface{}, 0)
if err = theDB.Select(fields).Find(&data).Error; err != nil {
return
}
//处理结果
desc = make(Kvs)
for i, v := range data {
key := cast.ToString(v["__mc_key"])
//树形
if m.attr.IsTree && qo.ReturnPath {
key = cast.ToString(v[m.attr.Tree.PathField])
}
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
data[i]["__mc_value"] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i]["__mc_value"])
}
desc[key] = v
}
return
}
// 获取一条编辑数据
func (m *Model) TakeForEdit(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
indent := ""
qo.NotConvertFromValue = true
qo.NotSearch = true
qo.TreeIndent = &indent
qo.useModelFiledType = "edit"
return m.Take(qo)
}
// 获取一条list数据
func (m *Model) Take(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
qo.PageSize = 1
qo.Page = 1
qo.NotTotal = true
qo.NotFoot = true
if data, _, _, err := m.Find(qo); err != nil {
return nil, false, err
}else if len(data) < 0 {
return nil, false, nil
}else{
return data[0], true, nil
}
}
// 获取list数据列表
func (m *Model) Find(qo *QueryOption) (desc []map[string]interface{}, foot map[string]interface{}, total int64, err error) {
//检查选项
if qo == nil {
qo = &QueryOption{}
}
//分析查询的字段
fields, footFields := m.ParseFields(qo)
if fields == nil || len(fields) <= 0 {
return
}
//分析查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, qo.Values, qo.NotSearch)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//分页信息
offset, limit := getOffsetLimit(qo.Page, qo.PageSize)
//查询
desc = make([]map[string]interface{}, 0)
db := theDB.Session(&gorm.Session{})
db.Offset(offset).Limit(limit).Select(fields).Find(&desc)
if !qo.NotTotal {
db = theDB.Session(&gorm.Session{})
db.Count(&total)
}
if theDB.Error != nil {
err = theDB.Error
return
}
//汇总
if !qo.NotFoot && footFields != nil && len(footFields) > 0 {
foot = make(map[string]interface{})
if err = theDB.Select(footFields).Offset(0).Limit(1).Take(&foot).Error; err != nil {
return
}
}
err = m.ProcessData(desc, qo)
return
}
// 判断是否已有重复数据
func (m *Model) CheckUnique(data map[string]interface{}, oldPkValue interface{})(err error) {
//如果没有设置唯一字段,且主键是自增时,直接返回不重复
if (m.attr.UniqueFields == nil || len(m.attr.UniqueFields) <= 0) && m.attr.AutoInc {
return
}
db := m.BaseDB(true)
pk := m.FieldAddAlias(m.attr.Pk)
fileTitles := make([]string, 0)
if oldPkValue != nil {
db.Where(fmt.Sprintf("%s <> ?", pk), oldPkValue)
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[pk]].Title)
}
where := ""
whereValue := make([]interface{}, 0)
//检查唯一字段
for _, field := range m.attr.UniqueFields {
if where == "" {
where += fmt.Sprintf(" %s = ?", m.FieldAddAlias(field))
} else {
where += fmt.Sprintf(" AND %s = ?", m.FieldAddAlias(field))
}
whereValue = append(whereValue, data[field])
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[field]].Title)
}
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if where == "" {
where = fmt.Sprintf("%s = ?", pk)
} else {
where = fmt.Sprintf("( %s ) OR ( %s )", where, fmt.Sprintf("%s = ?", pk))
}
whereValue = append(whereValue, data[m.attr.Pk])
}
db.Where(where, whereValue...)
var total int64
if err := db.Count(&total).Error; err != nil {
return er | 填字段
func (m *Model) CheckRequiredValues(data map[string]interface{}) (err error) {
fieldTitles := make([]string, 0)
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if cast.ToString(data[m.attr.Pk]) == "" {
fieldTitles = append(fieldTitles, m.attr.Fields[m.attr.fieldIndexMap[m.attr.Pk]].Title)
}
}
//检查配置中的必填字段
for _, field := range m.attr.Fields {
if !field.Required {
continue
}
if cast.ToString(data[field.Name]) == "" {
fieldTitles = append(fieldTitles, field.Title)
}
}
if len(fieldTitles) > 0 {
return &Result{Message:fmt.Sprintf("【%s】 字段为必填项", strings.Join(fieldTitles, "、"))}
}
return
}
// 更新记录
func (m *Model) Updates(data map[string]interface{}, oldPkValue interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, oldPkValue); err != nil {
return
}
//更新数据
db := m.BaseDB(false)
db.Where(fmt.Sprintf("`%s` = ?", m.attr.Pk), oldPkValue).Updates(data)
return db.RowsAffected, db.Error
}
// 创建记录
func (m *Model) Create(data map[string]interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, nil); err != nil {
return
}
//创建数据
db := m.BaseDB(false).Create(data)
return db.RowsAffected, db.Error
}
//保存记录(根据pk自动分析是update 或 create)
func (m *Model) Save(data map[string]interface{}, oldPkValue interface{})(rowsAffected int64, err error) {
if oldPkValue == nil { //创建
return m.Create(data)
} else { //更新
return m.Updates(data, oldPkValue)
}
}
//根据PK字段删除记录
func (m *Model) Delete(id interface{}) (rowsAffected int64, err error) {
var delIds interface{}
kind := reflect.TypeOf(id).Kind()
symbol := ""
if kind == reflect.Array || kind == reflect.Slice {
symbol = "IN"
delIds = id
} else {
symbol = "="
delIds = []interface{}{id}
}
db := m.BaseDB(false).Where(fmt.Sprintf("`%s` %s ?", m.attr.Pk, symbol), delIds).Delete(nil)
return db.RowsAffected, db.Error
}
// 分析查询条件 (此批条件只作用于返回的db对象上,不会作用于模型的db上)
// @param extraWhere 额外的查询条件
// @param searchValues 查询字段值
// @param notSearch 是否使用查询字段条件
func (m *Model) ParseWhere(db *gorm.DB, extraWhere []interface{}, searchValues map[string]interface{}, notSearch bool) *gorm.DB {
var theDB *gorm.DB
if db == nil {
theDB = m.NewDB()
} else {
theDB = db.Where("")
}
//额外的查询条件
if extraWhere != nil {
theDB.Where(extraWhere[0], extraWhere[1:]...)
}
// 模型各查询字段
if !notSearch {
searchValues = m.ParseSearchValues(searchValues)
for _, f := range m.attr.SearchFields {
// 该查询字段未带条件配置 或 未传值,跳过
if _, ok := searchValues[f.Name]; !ok {
continue
}
if f.Where == "" {
f.Where = fmt.Sprintf("%s = ?", m.FieldAddAlias(f.Name))
f.Values = []string{"?"}
}
// 查询值与查询条件匹配
values := make([]interface{}, 0)
if f.Between { //范围值
vType := reflect.TypeOf(searchValues[f.Name]).Kind()
var vs []string
if vType == reflect.Array || vType == reflect.Slice {
vs = searchValues[f.Name].([]string)
} else {
vs = strings.Split(cast.ToString(searchValues[f.Name]), f.BetweenSep)
}
for i, v := range f.Values {
if v == "?" {
values = append(values, vs[i])
} else {
values = append(values, strings.ReplaceAll(v, "?", vs[i]))
}
}
} else { //单个值
for _, v := range f.Values {
if v == "?" {
values = append(values, searchValues[f.Name])
} else {
values = append(values, strings.ReplaceAll(v, "?", cast.ToString(searchValues[f.Name])))
}
}
}
theDB.Where(f.Where, values...)
}
}
//受行权限控制的字段进行数据权限过滤
for fieldName, fromInfo := range m.attr.rowAuthFieldMap {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(fromInfo.FromName); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(fieldName)), rowAuth)
}
}
//如果自身也是行权限模型,则进行本身数据权限过滤
if m.attr.isRowAuth {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(m.attr.Name); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(m.attr.Pk)), rowAuth)
}
}
return theDB
}
//分析查询字段
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的字段名数组
// @return footFields 汇总字段
func (m *Model) ParseFields(qo *QueryOption)(fields []string,footFields []string) {
fields = make([]string, 0)
footFields = make([]string, 0)
//扩展字段
fields = append(fields, m.FieldsAddAlias(qo.ExtraFields)...)
// 树型必备字段
if m.attr.IsTree {
fields = append(fields, m.ParseTreeExtraField()...)
}
var modelFields []*ModelField
if strings.ToLower(qo.useModelFiledType) == "edit" {
modelFields = m.attr.editFields
}else{
modelFields = m.attr.listFields
}
for _, field := range modelFields {
//基础字段
fieldName := ""
if field.Alias == "" {
fieldName = m.FieldAddAlias(field.Name)
} else if field.Alias != "" {
fieldName = fmt.Sprintf("%s AS %s", field.Alias, field.Name)
}
fields = append(fields, fieldName)
//汇总字段
if field.Foot != "" {
footFields = append(footFields, fmt.Sprintf("%s AS %s", field.Foot, field.Name))
}
}
return
}
// 分析kv字段数组 (仅对通过NewConfigModel创建的模型有效)
// @param kvName kv配置项名
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的KV字段名数组
func (m *Model) ParseKvFields(kvName string, extraFields []string) (fields []string) {
fields = make([]string, 0)
// kv配置中的字段
kv, ok := ModelKv{}, false
if kv, ok = m.attr.Kvs[kvName]; !ok {
return
}
//keySep := fmt.Sprintf(",'%s',", kv.KeySep)
//valueSep := fmt.Sprintf(",'%s',", kv.ValueSep)
keyField := fmt.Sprintf("%s AS `__mc_key`", m.FieldAddAlias(kv.KeyField))
valueField := fmt.Sprintf("%s AS `__mc_value`", m.FieldAddAlias(kv.ValueField))
fields = append(fields, keyField, valueField)
// 树型必备字段
if m.attr.IsTree {
treePathField := m.FieldAddAlias(m.attr.Tree.PathField)
fields = append(append(fields, treePathField), m.ParseTreeExtraField()...)
}
// 附加字段
if extraFields != nil {
fields = append(fields, m.FieldsAddAlias(extraFields)...)
}
return
}
// 给字段加表别名
func (m *Model) FieldAddAlias(field string) string {
if field == "" {
return ""
}
if strings.Contains(field, ".") || strings.Contains(field, "(") {
return field
} else {
return fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(field, " "))
}
}
// 给字段数组加表别名
func (m *Model) FieldsAddAlias(fields []string) []string {
newFields := make([]string, 0)
for _, v := range fields {
if v == "" {
continue
}
if strings.Contains(v, ".") || strings.Contains(v, "(") {
newFields = append(newFields, v)
} else {
newFields = append(newFields, fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(v, " ")))
}
}
return newFields
}
// 对查询的数据进行处理
func (m *Model) ProcessData(data []map[string]interface{}, qo *QueryOption)(err error) {
if data == nil || len(data) <= 0 {
return
}
//序号
if m.attr.Number {
for i, _ := range data {
data[i]["__mc_index"] = (qo.Page -1) * qo.PageSize + i + 1
}
}
//转换成from值
if !qo.NotConvertFromValue {
for _, f := range m.attr.Fields {
if _, ok := data[0][f.Name]; !ok {
continue
}
if f.From != "" {
enum := m.GetFromKvs(f.From)
for i, _ := range data {
if qo.AttachFromRealValue { //附加字段原值真实值
data[i]["__mc_"+f.Name] = data[i][f.Name]
}
vString := cast.ToString(data[i][f.Name]) //字段值
if f.Multiple { //多选
vs := strings.Split(vString, f.Separator)
newVs := make([]string, 0)
for _, v := range vs {
newVs = append(newVs, cast.ToString(enum[v]["__mc_value"]))
}
data[i][f.Name] = strings.Join(newVs, f.Separator)
} else { //单选
data[i][f.Name] = cast.ToString(enum[vString]["__mc_value"])
}
}
}
}
}
//树形
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
for i, _ := range data {
data[i][m.attr.Tree.NameField] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i][m.attr.Tree.NameField])
}
}
return
}
// 分析树形结构查询必须的扩展字段
func (m *Model) ParseTreeExtraField() (field []string) {
pathField := m.FieldAddAlias(m.attr.Tree.PathField)
__mc_pathField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Tree.PathField)
__mc_pkField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Pk)
field = make([]string, 3)
//层级字段
field[0] = fmt.Sprintf("CEILING(LENGTH(%s)/%d) AS `__mc_level`", pathField, m.attr.Tree.PathBit)
//父节点字段
field[1] = fmt.Sprintf("(SELECT %s FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_parent`",
__mc_pkField, m.attr.Table, m.attr.Table, __mc_pathField, pathField, pathField, m.attr.Tree.PathBit)
//字节点数字段
field[2] = fmt.Sprintf("(SELECT count(%s) FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_child_count`",
__mc_pkField, m.attr.Table, m.attr.Table, pathField, __mc_pathField, __mc_pathField, m.attr.Tree.PathBit)
return
} | r
} else if total > 0 {
return &Result{Message:fmt.Sprintf("记录已存在:【%s】存在重复", strings.Join(fileTitles, "、"))}
}
return nil
}
// 检查必 | conditional_block |
model_sql.go | package mc
import (
"fmt"
"github.com/spf13/cast"
"gorm.io/gorm"
"reflect"
"strings"
)
//kvs查询选项
type KvsQueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
KvName string //kv配置项名
ExtraWhere []interface{} //额外附加的查询条件
ReturnPath bool //当模型为树型结构时,返回的key是否使用path代替
ExtraFields []string //额外附加的查询字段
Order string //排序
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
}
//数据查询选项
type QueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
ExtraWhere []interface{} //附加的查询条件
Values map[string]interface{} //查询项的值
ExtraFields []string //额外附加的查询字段
Order string //排序
Page int //查询页码(仅对find有效)
PageSize int //查询记录数 (仅对find有效)
NotTotal bool //是否不查询总记录数 (仅对find有效)
NotSearch bool //是否不使用配置查询项进行查询
NotFoot bool //是否查询汇总项 (仅对find有效)
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
NotConvertFromValue bool //不转换from值, 默认false(转换)
AttachFromRealValue bool //是否附加kv及enum字段原值
useModelFiledType string // 取list字段还是edit字段列表 (list|edit)
}
type RowData map[string]interface{}
// 获取模型数据库连接对象本身
// 对此修改会影响模型本身的数据库连接
func (m *Model) DB() *gorm.DB {
return m.db
}
// 获取一个新的模型数据库连接对象
// 对此修改不会影响模型本身的数据库连接
func (m *Model) NewDB() *gorm.DB {
return m.db.Session(&gorm.Session{}).Where("")
}
// 获取一个仅包含连接名及表名的连接对象
// param isAs 表是否带别名
func (m *Model) BaseDB(isAs bool) *gorm.DB {
db := GetDB(m.attr.ConnName)
if isAs {
tb := fmt.Sprintf("%s AS %s", m.attr.Table, m.attr.Alias)
if m.attr.DBName != "" {
tb = fmt.Sprintf("`%s`.%s", m.attr.DBName, tb)
}
db.Table(tb)
} else {
db.Table(m.attr.Table)
}
return db
}
// 获取Kv键值列表
func (m *Model) FindKvs(qo *KvsQueryOption) (desc Kvs, err error) {
//检查选项
if qo == nil {
qo = &KvsQueryOption{KvName: "default"}
}
if qo.KvName == "" {
qo.KvName = "default"
}
if !InArray(qo.KvName, m.attr.Kvs) {
err = fmt.Errorf("配置中不存在 [%s] kv 项配置", qo.KvName)
return
}
//分析kvs查询的字段
fields := m.ParseKvFields(qo.KvName, qo.ExtraFields)
if fields == nil || len(fields) <= 0 {
return
}
//分析kvs查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, nil, true)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Kvs[qo.KvName].Order != "" {
theDB.Order(m.attr.Kvs[qo.KvName].Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//查询
data := make([]map[string]interface{}, 0)
if err = theDB.Select(fields).Find(&data).Error; err != nil {
return
}
//处理结果
desc = make(Kvs)
for i, v := range data {
key := cast.ToString(v["__mc_key"])
//树形
if m.attr.IsTree && qo.ReturnPath {
key = cast.ToString(v[m.attr.Tree.PathField])
}
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
data[i]["__mc_value"] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i]["__mc_value"])
}
desc[key] = v
}
return
}
// 获取一条编辑数据
func (m *Model) TakeForEdit(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
indent := ""
qo.NotConvertFromValue = true
qo.NotSearch = true
qo.TreeIndent = &indent
qo.useModelFiledType = "edit"
return m.Take(qo)
}
// 获取一条list数据
func (m *Model) Take(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
qo.PageSize = 1
qo.Page = 1
qo.NotTotal = true
qo.NotFoot = true
if data, _, _, err := m.Find(qo); err != nil {
return nil, false, err
}else if len(data) < 0 {
return nil, false, nil
}else{
return data[0], true, nil
}
}
// 获取list数据列表
func (m *Model) Find(qo *QueryOption) (desc []map[string]interface{}, foot map[string]interface{}, total int64, err error) {
//检查选项
if qo == nil {
qo = &QueryOption{}
}
//分析查询的字段
fields, footFields := m.ParseFields(qo)
if fields == nil || len(fields) <= 0 {
return
}
//分析查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, qo.Values, qo.NotSearch)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//分页信息
offset, limit := getOffsetLimit(qo.Page, qo.PageSize)
//查询
desc = make([]map[string]interface{}, 0)
db := theDB.Session(&gorm.Session{})
db.Offset(offset).Limit(limit).Select(fields).Find(&desc)
if !qo.NotTotal {
db = theDB.Session(&gorm.Session{})
db.Count(&total)
}
if theDB.Error != nil {
err = theDB.Error
return
}
//汇总
if !qo.NotFoot && footFields != nil && len(footFields) > 0 {
foot = make(map[string]interface{})
if err = theDB.Select(footFields).Offset(0).Limit(1).Take(&foot).Error; err != nil {
return
}
}
err = m.ProcessData(desc, qo)
return
}
// 判断是否已有重复数据
func (m *Model) CheckUnique(data map[string]interface{}, oldPkValue interface{})(err error) {
//如果没有设置唯一字段,且主键是自增时,直接返回不重复
if (m.attr.UniqueFields == nil || len(m.attr.UniqueFields) <= 0) && m.attr.AutoInc {
return
}
db := m.BaseDB(true)
pk := m.FieldAddAlias(m.attr.Pk)
fileTitles := make([]string, 0)
if oldPkValue != nil {
db.Where(fmt.Sprintf("%s <> ?", pk), oldPkValue)
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[pk]].Title)
}
where := ""
whereValue := make([]interface{}, 0)
//检查唯一字段
for _, field := range m.attr.UniqueFields {
if where == "" {
where += fmt.Sprintf(" %s = ?", m.FieldAddAlias(field))
} else {
where += fmt.Sprintf(" AND %s = ?", m.FieldAddAlias(field))
}
whereValue = append(whereValue, data[field])
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[field]].Title)
}
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if where == "" {
where = fmt.Sprintf("%s = ?", pk)
} else {
where = fmt.Sprintf("( %s ) OR ( %s )", where, fmt.Sprintf("%s = ?", pk))
}
whereValue = append(whereValue, data[m.attr.Pk])
}
db.Where(where, whereValue...)
var total int64
if err := db.Count(&total).Error; err != nil {
return err
} else if total > 0 {
return &Result{Message:fmt.Sprintf("记录已存在:【%s】存在重复", strings.Join(fileTitles, "、"))}
}
return nil
}
// 检查必填字段
func (m *Model) CheckRequiredValues(data map[string]interface{}) (err error) {
fieldTitles := make([]string, 0)
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if cast.ToString(data[m.attr.Pk]) == "" {
fieldTitles = append(fieldTitles, m.attr.Fields[m.attr.fieldIndexMap[m.attr.Pk]].Title)
}
}
//检查配置中的必填字段
for _, field := range m.attr.Fields {
if !field.Required {
continue
}
if cast.ToString(data[field.Name]) == "" {
fieldTitles = append(fieldTitles, field.Title)
}
}
if len(fieldTitles) > 0 {
return &Result{Message:fmt.Sprintf("【%s】 字段为必填项", strings.Join(fieldTitles, "、"))}
}
return
}
// 更新记录
func (m *Model) Updates(data map[string]interface{}, oldPkValue interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, oldPkValue); err != nil {
return
}
//更新数据
db := m.BaseDB(false)
db.Where(fmt.Sprintf("`%s` = ?", m.attr.Pk), oldPkValue).Updates(data)
return db.RowsAffected, db.Error
}
// 创建记录
func (m *Model) Create(data map[string]interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, nil); err != nil {
return
}
//创建数据
db := m.BaseDB(false).Create(data)
return db.RowsAffected, db.Error
}
//保存记录(根据pk自动分析是update 或 create)
func (m *Model) Save(data map[string]interface{}, oldPkValue interface{})(rowsAffected int64, err error) {
if oldPkValue == nil { //创建
return m.Create(data)
} else { //更新
return m.Updates(data, oldPkValue)
}
}
//根据PK字段删除记录
func (m *Model) Delete(id interface{}) (rowsAffected int64, err error) {
var delIds interface{}
kind := reflect.TypeOf(id).Kind()
symbol := ""
if kind == reflect.Array || kind == reflect.Slice {
symbol = "IN"
delIds = id
} else {
symbol = "="
delIds = []interface{}{id}
}
db := m.BaseDB(false).Where(fmt.Sprintf("`%s` %s ?", m.attr.Pk, symbol), delIds).Delete(nil)
return db.RowsAffected, db.Error
}
// 分析查询条件 (此批条件只作用于返回的db对象上,不会作用于模型的db上)
// @param extraWhere 额外的查询条件
// @param searchValues 查询字段值
// @param notSearch 是否使用查询字段条件
func (m *Model) ParseWhere(db *gorm.DB, extraWhere []interface{}, searchValues map[string]interface{}, notSearch bool) *gorm.DB {
var theDB *gorm.DB
if db == nil {
theDB = m.NewDB()
} else {
theDB = db.Where("")
}
//额外的查询条件
if extraWhere != nil {
theDB.Where(extraWhere[0], extraWhere[1:]...)
}
// 模型各查询字段
if !notSearch {
searchValues = m.ParseSearchValues(searchValues)
for _, f := range m.attr.SearchFields {
// 该查询字段未带条件配置 或 未传值,跳过
if _, ok := searchValues[f.Name]; !ok {
continue
}
if f.Where == "" {
f.Where = fmt.Sprintf("%s = ?", m.FieldAddAlias(f.Name))
f.Values = []string{"?"}
}
// 查询值与查询条件匹配
values := make([]interface{}, 0)
if f.Between { //范围值
vType := reflect.TypeOf(searchValues[f.Name]).Kind()
var vs []string
if vType == reflect.Array || vType == reflect.Slice {
vs = searchValues[f.Name].([]string)
} else {
vs = strings.Split(cast.ToString(searchValues[f.Name]), f.BetweenSep)
}
for i, v := range f.Values {
if v == "?" {
values = append(values, vs[i])
} else {
values = append(values, strings.ReplaceAll(v, "?", vs[i]))
}
}
} else { //单个值
for _, v := range f.Values {
if v == "?" {
values = append(values, searchValues[f.Name])
} else {
values = append(values, strings.ReplaceAll(v, "?", cast.ToString(searchValues[f.Name])))
}
}
}
theDB.Where(f.Where, values...)
}
}
//受行权限控制的字段进行数据权限过滤
for fieldName, fromInfo := range m.attr.rowAuthFieldMap {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(fromInfo.FromName); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(fieldName)), rowAuth)
}
}
//如果自身也是行权限模型,则进行本身数据权限过滤
if m.attr.isRowAuth {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(m.attr.Name); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(m.attr.Pk)), rowAuth)
}
}
return theDB
}
//分析查询字段
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的字段名数组
// @return footFields 汇总字段
func (m *Model) ParseFields(qo *QueryOption)(fields []string,footFields []string) {
fields = make([]string, 0)
footFields = make([]string, 0)
//扩展字段
fields = append(fields, m.FieldsAddAlias(qo.ExtraFields)...)
// 树型必备字段
if m.attr.IsTree {
fields = append(fields, m.ParseTreeExtraField()...)
}
var modelFields []*ModelField
if strings.ToLower(qo.useModelFiledType) == "edit" {
modelFields = m.attr.editFields
}else{
modelFields = m.attr.listFields
}
for _, field := range modelFields {
//基础字段
fieldName := ""
if field.Alias == "" {
fieldName = m.FieldAddAlias(field.Name)
} else if field.Alias != "" {
fieldName = fmt.Sprintf("%s AS %s", field.Alias, field.Name)
}
fields = append(fields, fieldName)
//汇总字段
if field.Foot != "" {
footFields = append(footFields, fmt.Sprintf("%s AS %s", field.Foot, field.Name))
}
}
return
}
// 分析kv字段数组 (仅对通过NewConfigModel创建的模型有效)
// @param kvName kv配置项名
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的KV字段名数组
func (m *Model) ParseKvFields(kvName string, extraFields []string) (fields []string) {
fields = make([]string, 0)
// kv配置中的字段
kv, ok := ModelKv{}, false
if kv, ok = m.attr.Kvs[kvName]; !ok {
return
}
//keySep := fmt.Sprintf(",'%s',", kv.KeySep)
//valueSep := fmt.Sprintf(",'%s',", kv.ValueSep)
keyField := fmt.Sprintf("%s AS `__mc_key`", m.FieldAddAlias(kv.KeyField))
valueField := fmt.Sprintf("%s AS `__mc_value`", m.FieldAddAlias(kv.ValueField))
fields = append(fields, keyField, valueField)
// 树型必备字段
if m.attr.IsTree {
treePathField := m.FieldAddAlias(m.attr.Tree.PathField)
fields = append(append(fields, treePathField), m.ParseTreeExtraField()...)
}
// 附加字段
if extraFields != nil {
fields = append(fields, m.FieldsAddAlias(extraFields)...)
}
return
}
// 给字段加表别名
func (m *Model) FieldAddAlias(field string) string {
if field == "" {
return ""
}
if strings.Contains(field, ".") || strings.Contains(field, "(") {
return field
} else {
return fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(field, " "))
}
}
// 给字段数组加表别名
func (m *Model) FieldsAddAlias(fields []string) []string {
newFields := make([]string, 0)
for _, v := range fields {
if v == "" {
continue
}
if strings.Contains(v, ".") || strings.Contains(v, "(") {
newFields = append(newFields, v)
} else {
newFields = append(newFields, fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(v, " ")))
}
}
return newFields
}
// 对查询的数据进行处理
func (m *Model) ProcessData(data []map[string]interface{}, qo *QueryOption)(err error) {
if data == nil || len(data) <= 0 {
return
}
//序号
if m.attr.Number {
for i, _ := range data {
data[i]["__mc_index"] = (qo.Page -1) * qo.PageSize + i + 1
}
}
//转换成from值
if !qo.NotConvertFromValue {
for _, f := range m.attr.Fields {
if _, ok := data[0][f.Name]; !ok {
continue
}
if f.From != "" {
enum := m.GetFromKvs(f.From)
for i, _ := range data {
if qo.AttachFromRealValue { //附加字段原值真实值
data[i]["__mc_"+f.Name] = data[i][f.Name]
}
vString := cast.ToString(data[i][f.Name]) //字段值
if f.Multiple { //多选
vs := strings.Split(vString, f.Separator)
newVs := make([]string, 0)
for _, v := range vs {
newVs = append(newVs, cast.ToString(enum[v]["__mc_value"]))
}
data[i][f.Name] = strings.Join(newVs, f.Separator)
} else { //单选
data[i][f.Name] = cast.ToString(enum[vString]["__mc_value"])
}
}
}
}
}
//树形
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
for i, _ := range data {
data[i][m.attr.Tree.NameField] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cas | ", m.attr.Table, m.attr.Pk)
field = make([]string, 3)
//层级字段
field[0] = fmt.Sprintf("CEILING(LENGTH(%s)/%d) AS `__mc_level`", pathField, m.attr.Tree.PathBit)
//父节点字段
field[1] = fmt.Sprintf("(SELECT %s FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_parent`",
__mc_pkField, m.attr.Table, m.attr.Table, __mc_pathField, pathField, pathField, m.attr.Tree.PathBit)
//字节点数字段
field[2] = fmt.Sprintf("(SELECT count(%s) FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_child_count`",
__mc_pkField, m.attr.Table, m.attr.Table, pathField, __mc_pathField, __mc_pathField, m.attr.Tree.PathBit)
return
} | t.ToString(data[i][m.attr.Tree.NameField])
}
}
return
}
// 分析树形结构查询必须的扩展字段
func (m *Model) ParseTreeExtraField() (field []string) {
pathField := m.FieldAddAlias(m.attr.Tree.PathField)
__mc_pathField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Tree.PathField)
__mc_pkField := fmt.Sprintf("`__mc_%s`.`%s` | identifier_body |
model_sql.go | package mc
import (
"fmt"
"github.com/spf13/cast"
"gorm.io/gorm"
"reflect"
"strings"
)
//kvs查询选项
type KvsQueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
KvName string //kv配置项名
ExtraWhere []interface{} //额外附加的查询条件
ReturnPath bool //当模型为树型结构时,返回的key是否使用path代替
ExtraFields []string //额外附加的查询字段
Order string //排序
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
}
//数据查询选项
type QueryOption struct {
DB *gorm.DB //当此项为空的,使用model.db
ExtraWhere []interface{} //附加的查询条件
Values map[string]interface{} //查询项的值
ExtraFields []string //额外附加的查询字段
Order string //排序
Page int //查询页码(仅对find有效)
PageSize int //查询记录数 (仅对find有效)
NotTotal bool //是否不查询总记录数 (仅对find有效)
NotSearch bool //是否不使用配置查询项进行查询
NotFoot bool //是否查询汇总项 (仅对find有效)
TreeIndent *string //树型模型节点名称前根据层级加前缀字符
NotConvertFromValue bool //不转换from值, 默认false(转换)
AttachFromRealValue bool //是否附加kv及enum字段原值
useModelFiledType string // 取list字段还是edit字段列表 (list|edit)
}
type RowData map[string]interface{}
// 获取模型数据库连接对象本身
// 对此修改会影响模型本身的数据库连接
func (m *Model) DB() *gorm.DB {
return m.db
}
// 获取一个新的模型数据库连接对象
// 对此修改不会影响模型本身的数据库连接
func (m *Model) NewDB() *gorm.DB {
return m.db.Session(&gorm.Session{}).Where("")
}
// 获取一个仅包含连接名及表名的连接对象
// param isAs 表是否带别名
func (m *Model) BaseDB(isAs bool) *gorm.DB {
db := GetDB(m.attr.ConnName)
if isAs {
tb := fmt.Sprintf("%s AS %s", m.attr.Table, m.attr.Alias)
if m.attr.DBName != "" {
tb = fmt.Sprintf("`%s`.%s", m.attr.DBName, tb)
}
db.Table(tb)
} else {
db.Table(m.attr.Table)
}
return db
}
// 获取Kv键值列表
func (m *Model) FindKvs(qo *KvsQueryOption) (desc Kvs, err error) {
//检查选项
if qo == nil {
qo = &KvsQueryOption{KvName: "default"}
}
if qo.KvName == "" {
qo.KvName = "default"
}
if !InArray(qo.KvName, m.attr.Kvs) {
err = fmt.Errorf("配置中不存在 [%s] kv 项配置", qo.KvName)
return
}
//分析kvs查询的字段
fields := m.ParseKvFields(qo.KvName, qo.ExtraFields)
if fields == nil || len(fields) <= 0 {
return
}
//分析kvs查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, nil, true)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Kvs[qo.KvName].Order != "" {
theDB.Order(m.attr.Kvs[qo.KvName].Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//查询
data := make([]map[string]interface{}, 0)
if err = theDB.Select(fields).Find(&data).Error; err != nil {
return
}
//处理结果
desc = make(Kvs)
for i, v := range data {
key := cast.ToString(v["__mc_key"])
//树形
if m.attr.IsTree && qo.ReturnPath {
key = cast.ToString(v[m.attr.Tree.PathField])
}
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
data[i]["__mc_value"] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i]["__mc_value"])
}
desc[key] = v
}
return
}
// 获取一条编辑数据
func (m *Model) TakeForEdit(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
indent := ""
qo.NotConvertFromValue = true
qo.NotSearch = true
qo.TreeIndent = &indent
qo.useModelFiledType = "edit"
return m.Take(qo)
}
// 获取一条list数据
func (m *Model) Take(qo *QueryOption) (desc map[string]interface{}, exist bool, err error) {
qo.PageSize = 1
qo.Page = 1
qo.NotTotal = true
qo.NotFoot = true
if data, _, _, err := m.Find(qo); err != nil {
return nil, false, err
}else if len(data) < 0 {
return nil, false, nil
}else{
return data[0], true, nil
}
}
// 获取list数据列表
func (m *Model) Find(qo *QueryOption) (desc []map[string]interface{}, foot map[string]interface{}, total int64, err error) {
//检查选项
if qo == nil {
qo = &QueryOption{}
}
//分析查询的字段
fields, footFields := m.ParseFields(qo)
if fields == nil || len(fields) <= 0 {
return
}
//分析查询条件
theDB := m.ParseWhere(qo.DB, qo.ExtraWhere, qo.Values, qo.NotSearch)
//排序
if qo.Order != "" {
theDB.Order(qo.Order)
} else if m.attr.Order != "" {
theDB.Order(m.attr.Order)
}
//分页信息
offset, limit := getOffsetLimit(qo.Page, qo.PageSize)
//查询
desc = make([]map[string]interface{}, 0)
db := theDB.Session(&gorm.Session{})
db.Offset(offset).Limit(limit).Select(fields).Find(&desc)
if !qo.NotTotal {
db = theDB.Session(&gorm.Session{})
db.Count(&total)
}
if theDB.Error != nil {
err = theDB.Error
return
}
//汇总
if !qo.NotFoot && footFields != nil && len(footFields) > 0 {
foot = make(map[string]interface{})
if err = theDB.Select(footFields).Offset(0).Limit(1).Take(&foot).Error; err != nil {
return
}
}
err = m.ProcessData(desc, qo)
return
}
// 判断是否已有重复数据
func (m *Model) CheckUnique(data map[string]interface{}, oldPkValue interface{})(err error) {
//如果没有设置唯一字段,且主键是自增时,直接返回不重复
if (m.attr.UniqueFields == nil || len(m.attr.UniqueFields) <= 0) && m.attr.AutoInc {
return
}
db := m.BaseDB(true)
pk := m.FieldAddAlias(m.attr.Pk)
fileTitles := make([]string, 0)
if oldPkValue != nil {
db.Where(fmt.Sprintf("%s <> ?", pk), oldPkValue)
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[pk]].Title)
}
where := ""
whereValue := make([]interface{}, 0)
//检查唯一字段
for _, field := range m.attr.UniqueFields {
if where == "" {
where += fmt.Sprintf(" %s = ?", m.FieldAddAlias(field))
} else {
where += fmt.Sprintf(" AND %s = ?", m.FieldAddAlias(field))
}
whereValue = append(whereValue, data[field])
fileTitles = append(fileTitles, m.attr.Fields[m.attr.fieldIndexMap[field]].Title)
}
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if where == "" {
where = fmt.Sprintf("%s = ?", pk)
} else {
where = fmt.Sprintf("( %s ) OR ( %s )", where, fmt.Sprintf("%s = ?", pk))
}
whereValue = append(whereValue, data[m.attr.Pk])
}
db.Where(where, whereValue...)
var total int64
if err := db.Count(&total).Error; err != nil {
return err
} else if total > 0 {
return &Result{Message:fmt.Sprintf("记录已存在:【%s】存在重复", strings.Join(fileTitles, "、"))}
}
return nil
}
// 检查必填字段
func (m *Model) CheckRequiredValues(data map[string]interface{}) (err error) {
fieldTitles := make([]string, 0)
//非自增PK表,检查PK字段
if !m.attr.AutoInc {
if cast.ToString(data[m.attr.Pk]) == "" {
fieldTitles = append(fieldTitles, m.attr.Fields[m.attr.fieldIndexMap[m.attr.Pk]].Title)
}
}
//检查配置中的必填字段
for _, field := range m.attr.Fields {
if !field.Required {
continue
}
if cast.ToString(data[field.Name]) == "" {
fieldTitles = append(fieldTitles, field.Title)
}
}
if len(fieldTitles) > 0 {
return &Result{Message:fmt.Sprintf("【%s】 字段为必填项", strings.Join(fieldTitles, "、"))}
}
return
}
// 更新记录
func (m *Model) Updates(data map[string]interface{}, oldPkValue interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, oldPkValue); err != nil {
return
}
//更新数据
db := m.BaseDB(false)
db.Where(fmt.Sprintf("`%s` = ?", m.attr.Pk), oldPkValue).Updates(data)
return db.RowsAffected, db.Error
}
// 创建记录
func (m *Model) Create(data map[string]interface{}) (rowsAffected int64, err error) {
//检查必填项
if err = m.CheckRequiredValues(data); err != nil {
return
}
//检查重复记录
if err = m.CheckUnique(data, nil); err != nil {
return
}
//创建数据
db := m.BaseDB(false).Create(data)
return db.RowsAffected, db.Error
}
//保存记录(根据pk自动分析是update 或 create)
func (m *Model) Save(data map[string]interface{}, oldPkValue interface{})(rowsAffected int64, err error) {
if oldPkValue == nil { //创建
return m.Create(data)
} else { //更新
return m.Updates(data, oldPkValue)
}
}
//根据PK字段删除记录
func (m *Model) Delete(id interface{}) (rowsAffected int64, err error) {
var delIds interface{}
kind := reflect.TypeOf(id).Kind()
symbol := ""
if kind == reflect.Array || kind == reflect.Slice {
symbol = "IN"
delIds = id
} else {
symbol = "="
delIds = []interface{}{id}
}
db := m.BaseDB(false).Where(fmt.Sprintf("`%s` %s ?", m.attr.Pk, symbol), delIds).Delete(nil)
return db.RowsAffected, db.Error
}
// 分析查询条件 (此批条件只作用于返回的db对象上,不会作用于模型的db上)
// @param extraWhere 额外的查询条件
// @param searchValues 查询字段值
// @param notSearch 是否使用查询字段条件
func (m *Model) ParseWhere(db *gorm.DB, extraWhere []interface{}, searchValues map[string]interface{}, notSearch bool) *gorm.DB {
var theDB *gorm.DB
if db == nil {
theDB = m.NewDB()
} else {
theDB = db.Where("")
}
//额外的查询条件
if extraWhere != nil {
theDB.Where(extraWhere[0], extraWhere[1:]...)
}
// 模型各查询字段
if !notSearch {
searchValues = m.ParseSearchValues(searchValues)
for _, f := range m.attr.SearchFields {
// 该查询字段未带条件配置 或 未传值,跳过
if _, ok := searchValues[f.Name]; !ok {
continue
}
if f.Where == "" {
f.Where = fmt.Sprintf("%s = ?", m.FieldAddAlias(f.Name))
f.Values = []string{"?"}
}
// 查询值与查询条件匹配
values := make([]interface{}, 0)
if f.Between { //范围值
vType := reflect.TypeOf(searchValues[f.Name]).Kind()
var vs []string
if vType == reflect.Array || vType == reflect.Slice {
vs = searchValues[f.Name].([]string)
} else {
vs = strings.Split(cast.ToString(searchValues[f.Name]), f.BetweenSep)
}
for i, v := range f.Values {
if v == "?" {
values = append(values, vs[i])
} else {
values = append(values, strings.ReplaceAll(v, "?", vs[i]))
}
}
} else { //单个值
for _, v := range f.Values {
if v == "?" {
values = append(values, searchValues[f.Name])
} else {
values = append(values, strings.ReplaceAll(v, "?", cast.ToString(searchValues[f.Name])))
}
}
}
theDB.Where(f.Where, values...)
}
}
//受行权限控制的字段进行数据权限过滤
for fieldName, fromInfo := range m.attr.rowAuthFieldMap {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(fromInfo.FromName); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(fieldName)), rowAuth)
}
}
//如果自身也是行权限模型,则进行本身数据权限过滤
if m.attr.isRowAuth {
if rowAuth, isAllAuth := option.ModelAuth.GetRowAuthCallback(m.attr.Name); !isAllAuth {
theDB.Where(fmt.Sprintf("%s IN ?", m.FieldAddAlias(m.attr.Pk)), rowAuth)
}
}
return theDB
}
//分析查询字段
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的字段名数组
// @return footFields 汇总字段
func (m *Model) ParseFields(qo *QueryOption)(fields []string,footFields []string) {
fields = make([]string, 0)
footFields = make([]string, 0)
//扩展字段
fields = append(fields, m.FieldsAddAlias(qo.ExtraFields)...)
// 树型必备字段
if m.attr.IsTree {
fields = append(fields, m.ParseTreeExtraField()...)
}
var modelFields []*ModelField
if strings.ToLower(qo.useModelFiledType) == "edit" {
modelFields = m.attr.editFields
}else{
modelFields = m.attr.listFields
}
for _, field := range modelFields {
//基础字段
fieldName := ""
if field.Alias == "" {
fieldName = m.FieldAddAlias(field.Name)
} else if field.Alias != "" {
fieldName = fmt.Sprintf("%s AS %s", field.Alias, field.Name)
}
fields = append(fields, fieldName)
//汇总字段
if field.Foot != "" {
footFields = append(footFields, fmt.Sprintf("%s AS %s", field.Foot, field.Name))
}
}
return
}
// 分析kv字段数组 (仅对通过NewConfigModel创建的模型有效)
// @param kvName kv配置项名
// @param extraFields 额外附加的字段
// @return fields 最终需要查询的KV字段名数组
func (m *Model) ParseKvFields(kvName string, extraFields []string) (fields []string) {
fields = make([]string, 0)
// kv配置中的字段
kv, ok := ModelKv{}, false
if kv, ok = m.attr.Kvs[kvName]; !ok {
return
}
//keySep := fmt.Sprintf(",'%s',", kv.KeySep)
//valueSep := fmt.Sprintf(",'%s',", kv.ValueSep)
keyField := fmt.Sprintf("%s AS `__mc_key`", m.FieldAddAlias(kv.KeyField))
valueField := fmt.Sprintf("%s AS `__mc_value`", m.FieldAddAlias(kv.ValueField))
fields = append(fields, keyField, valueField)
// 树型必备字段
if m.attr.IsTree {
treePathField := m.FieldAddAlias(m.attr.Tree.PathField)
fields = append(append(fields, treePathField), m.ParseTreeExtraField()...)
}
// 附加字段
if extraFields != nil {
fields = append(fields, m.FieldsAddAlias(extraFields)...)
}
return
}
// 给字段加表别名
func (m *Model) FieldAddAlias(field string) string {
if field == "" {
return ""
}
if strings.Contains(field, ".") || strings.Contains(field, "(") {
return field
} else {
return fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(field, " "))
}
}
// 给字段数组加表别名
func (m *Model) FieldsAddAlias(fields []string) []string {
newFields := make([]string, 0)
for _, v := range fields {
if v == "" {
continue
}
if strings.Contains(v, ".") || strings.Contains(v, "(") {
newFields = append(newFields, v)
} else {
newFields = append(newFields, fmt.Sprintf("`%s`.`%s`", m.attr.Alias, strings.Trim(v, " ")))
}
}
return newFields
}
// 对查询的数据进行处理
func (m *Model) ProcessData(data []map[string]interface{}, qo *QueryOption)(err error) {
if data == nil || len(data) <= 0 {
return
}
//序号
if m.attr.Number {
for i, _ := range data {
data[i]["__mc_index"] = (qo.Page -1) * qo.PageSize + i + 1
}
}
//转换成from值
if !qo.NotConvertFromValue {
for _, f := range m.attr.Fields {
if _, ok := data[0][f.Name]; !ok {
continue
}
if f.From != "" {
enum := m.GetFromKvs(f.From)
for i, _ := range data {
if qo.AttachFromRealValue { //附加字段原值真实值
data[i]["__mc_"+f.Name] = data[i][f.Name]
}
vString := cast.ToString(data[i][f.Name]) //字段值
if f.Multiple { //多选
vs := strings.Split(vString, f.Separator)
newVs := make([]string, 0)
for _, v := range vs {
newVs = append(newVs, cast.ToString(enum[v]["__mc_value"]))
}
data[i][f.Name] = strings.Join(newVs, f.Separator)
} else { //单选
data[i][f.Name] = cast.ToString(enum[vStr | lue"])
}
}
}
}
}
//树形
indent := ""
if qo.TreeIndent == nil {
indent = m.attr.Tree.Indent
} else {
indent = *qo.TreeIndent
}
if m.attr.IsTree && indent != "" { //树形名称字段加前缀
for i, _ := range data {
data[i][m.attr.Tree.NameField] = nString(indent, cast.ToInt(data[i]["__mc_level"])-1) + cast.ToString(data[i][m.attr.Tree.NameField])
}
}
return
}
// 分析树形结构查询必须的扩展字段
func (m *Model) ParseTreeExtraField() (field []string) {
pathField := m.FieldAddAlias(m.attr.Tree.PathField)
__mc_pathField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Tree.PathField)
__mc_pkField := fmt.Sprintf("`__mc_%s`.`%s`", m.attr.Table, m.attr.Pk)
field = make([]string, 3)
//层级字段
field[0] = fmt.Sprintf("CEILING(LENGTH(%s)/%d) AS `__mc_level`", pathField, m.attr.Tree.PathBit)
//父节点字段
field[1] = fmt.Sprintf("(SELECT %s FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_parent`",
__mc_pkField, m.attr.Table, m.attr.Table, __mc_pathField, pathField, pathField, m.attr.Tree.PathBit)
//字节点数字段
field[2] = fmt.Sprintf("(SELECT count(%s) FROM `%s` AS `__mc_%s` WHERE %s=LEFT(%s, LENGTH(%s)-%d) LIMIT 1) AS `__mc_child_count`",
__mc_pkField, m.attr.Table, m.attr.Table, pathField, __mc_pathField, __mc_pathField, m.attr.Tree.PathBit)
return
} | ing]["__mc_va | identifier_name |
script.padrao.js | /**
* Esse script tem dependencia das seguintes bibliotecas:
*
* class.padrao.js
* style.padrao.js
*
* Terceiros:
*
* Bootstrap 5.1 ou superior.
* ChartJS;.
*
* */
const url = new URL(document.URL);
const urlHost = `${url.protocol}//${url.host}`;
const urlAPI = `${urlHost}/api/`;
//const urlAPI = `https://intru.herokuapp.com/`;
//const urlAPI = `https://localhost:44382/`;
//Cores primarias
const colorPrymary1Hex = "#02DDE8";
const colorPrymary2Hex = "#0297F2";
const colorPrymary3Hex = "#0944DB";
const colorPrymary4Hex = "#0F02F2";
const colorPrymary5Hex = "#5D00EB";
//Cores primarias
const colorPrymary1Rgb = "2, 221, 232";
const colorPrymary2Rgb = "2, 151, 242";
const colorPrymary3Rgb = "9, 68, 219";
const colorPrymary4Rgb = "15, 2, 242";
const colorPrymary5Rgb = "93, 0, 235";
document.addEventListener('DOMContentLoaded', () => {
var URLHOST = new URL(window.location.href);
var ext = URLHOST.host.indexOf('intru') > -1;
if (ext != null && ext != undefined) {
if (ext != true) {
return;
}
}
var cod = recuperaUserCodCookie();
if (cod == '' || cod == null || cod == undefined) {
if (window.location.href == `${urlHost}/Security/Login/`) {
return;
}
logOut();
}
})
const Grafico = {
/**
* Realiza a configuração do gráfico (não faz a renderização, apenas configura, utilize o Grafico.NewChart para renderizar um grafico, passando essa função como parâmetro)
* @param {string} typeChar Tipo do grafico a ser renderizado
* @param {object} pdata Um objeto do tipo ChartData contendo dos dados a ser exibidos
* @param {object} plabels Um objeto contendo as labels do grafico
* @returns void
*/
ConfigChart: (typeChar, pChartData, plabels) => {
try {
var labels = plabels != undefined && plabels != null ? plabels : [
'Empty'
];
var data = pChartData != undefined && pChartData != null ? pChartData : {
labels: labels,
datasets: [{
label: 'Dataset',
backgroundColor: `rgb(${colorPrymary1Rgb})`,
borderWidth: 2,
borderColor: `rgb(${colorPrymary2Rgb})`,
data: [0]
}]
};
var config = {
type: typeChar != undefined && typeChar != null ? (typeChar).toString() : 'line',
data,
options: {
title: {
display: true,
fontSize: 20,
text: "Relatorio"
}
}
};
return config;
} catch (erro) {
alert(erro);
}
},
/**
* Essa função faz a renderização de um grafico utilizando a tag canvas
* @param {*} configChart Um objeto configuravel retornado pela função Grafico.ConfigChart
* @param {} canvasId Id do canvas onde o grafico sera renderizado
* @returns void
*/
NewChart: (configChart, canvasId) => {
try {
if (configChart == undefined || configChart == null &&
canvasId == undefined || canvasId == null) {
return false;
}
var chart = new Chart(document.getElementById(canvasId.toString()),
configChart);
} catch (error) {
alert(error);
}
}
}
/**
* Função responsável pelas requisições para a API.
* @param {AjaxOptions} pOptions Objeto contendo a configuração da requisição.
* @param {any} asyncRequest Um booleano indicando se a requisição é assincrona, o valor padrão é true
*/
const Ajax = (pOptions, asyncRequest = true) => {
try {
if (pOptions == undefined || pOptions == null) {
return false;
}
if (typeof (asyncRequest) != "boolean") {
return false;
}
var http = new XMLHttpRequest();
var options = AjaxOptions;
options.onload = pOptions.onload != undefined && pOptions.onload != null ? pOptions.onload : AjaxOptions.onload;
options.onerror = pOptions.onerror != undefined && pOptions.onerror != null ? pOptions.onerror : AjaxOptions.onerror;
options.responseType = pOptions.responseType != undefined && pOptions.responseType != null ? pOptions.responseType : AjaxOptions.responseType;
options.method = pOptions.method;
options.url = pOptions.url;
if (pOptions.setRequestHeader != undefined && pOptions.setRequestHeader != null) {
options.setRequestHeader.name = pOptions.setRequestHeader.name != undefined && pOptions.setRequestHeader.name != null ? pOptions.setRequestHeader.name : AjaxOptions.setRequestHeader.name;
options.setRequestHeader.value = pOptions.setRequestHeader.value != undefined && pOptions.setRequestHeader.value != null ? pOptions.setRequestHeader.value : AjaxOptions.setRequestHeader.value;
}
http.open(options.method, options.url, asyncRequest);
http.responseType = options.responseType;
http.setRequestHeader(options.setRequestHeader.name, options.setRequestHeader.value);
http.onload = options.onload;
http.onerror = options.onerror;
http.onloadstart = pOptions.onloadstart != undefined && pOptions.onloadstart != null ? pOptions.onloadstart : AjaxOptions.onloadstart;
http.onloadend = pOptions.onloadend != undefined && pOptions.onloadend != null ? pOptions.onloadend : AjaxOptions.onloadend;
if (pOptions.data != null && pOptions.data != undefined) {
var data = options.setRequestHeader.value == 'application/json' ? JSON.stringify(options.data) : encodeURI(options.data);
http.send(data);
} else {
http.send();
| alert(error);
}
}
const API = {
/**
*Requisições do tipo GET
* @param {Opções para a definição da requisição} options
*/
GET: (options) => {
try {
if (options == undefined || options == null) {
return false;
}
Ajax(options);
} catch (error) {
alert(error);
}
},
/**
*
* @param {Opções para a definição da requisição} options
*/
POST: (options) => {
try {
Ajax(options);
} catch (error) {
alert(error);
}
}
}
const Elements = {
/**
* Retorna um elemento configurado por parametro.
* @param {any} type Tipo do elemento: div, button, input, label, canvas, p, h1, etc.
* @param {any} id
* @param {any} name
* @param {any} classe
* @param {any} style
* @param {any} onchange
*/
Create: (type = null, id = '', name = '', classe = null, style = null, classList = null, onchange = null) => {
try {
if (type == null && type == undefined) {
return null;
}
var element = document.createElement(type);
if (onchange != null, onchange != undefined) {
element.addEventListener('change', onchange);
}
if (style != null && style != undefined) {
element.style = style;
}
element.id = id;
if (classList != null && Array.isArray(classList)) {
for (i = 0; i < classList.length; i++) {
element.classList.add(classList[i]);
}
} else if (classe != null) {
element.classList.add(classe);
}
element.name = name;
return element;
} catch (error) {
console.log(error);
}
},
/**
* Renderiza um elemento de load na tela
* @param {any} type Tipo do load que sera renderizado, o tipo padrão é "Spin" (Spin, Growing)
* @param {any} idElement Id do elemento onde o load sera renderizado
* @param {any} style Tipo de cor ()
*/
Load: {
Create: (type, idElement, strStyle, strSpinnerColor) => {
try {
var span;
var div;
var spinnerColor = strSpinnerColor == null || strSpinnerColor == undefined ? "var(--colorPrymary5)" : strSpinnerColor;
var style = strStyle == null || strStyle == undefined ? `z-index: 999 !important; position: fixed !important; left: 50%;`
+ `top: 50% !important; bottom: 0 !important; color: ${spinnerColor} !important;` : strStyle;
switch (type) {
case "Spin":
div = Elements.Create('div', 'load', null, null, style, ["spinner-border", "text-primary"]);
span = Elements.Create('span', 'loadSpan', "visually-hidden", null);
div.appendChild(span);
break;
case "Growing":
div = Elements.Create('div', 'loadMestre', null, null, style);
var divSpinner = Elements.Create('div', 'divGrowing', null, null, `z-index: 150 !important; color: ${spinnerColor} !important;`, ["spinner-grow", "text-primary"]);
span = Elements.Create('span', 'loadGrowing', "visually-hidden", null);
divSpinner.appendChild(span);
div.appendChild(divSpinner);
break;
default:
div = Elements.Create('div', 'load', null, null, style, ["spinner-border", "text-primary"]);
span = Elements.Create('span', 'loadSpan', null, "visually-hidden");
div.appendChild(span);
break;
}
if (idElement == null || idElement == undefined) {
document.body.appendChild(div);
} else {
document.getElementById(idElement).appendChild(div);
}
} catch (error) {
console.log(error);
}
},
LoadRemove: (load) => {
try {
if (load == null || load == undefined) {
alert("O Load não pode ser nulo");
return
}
var ld = document.getElementById(load);
if (ld != null && ld != undefined) {
ld.remove();
}
} catch (error) {
console.log(error);
}
}
},
Message: {
Remove: () => {
document.querySelectorAll('.messageError').forEach((obj, index) => { obj.remove(); });
document.querySelectorAll('.messageSuccess').forEach((obj, index) => { obj.remove(); });
},
Success: (msg) => {
try {
Message.Remove();
} catch (error) { }
var div = Scripts.Elements.Create('div', null, 'message', 'messageSuccess');
var divContent = Scripts.Elements.Create('div', 'messageContent', null, 'messageContent');
var span = Scripts.Elements.Create('span');
// var iconTrash = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-eraser']);
var icon = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-check', 'iconMessage']);
span.textContent = msg;
//span.appendChild(iconTrash);
divContent.appendChild(span);
div.appendChild(divContent);
div.appendChild(icon);
document.body.appendChild(div);
},
Error: (msg) => {
try {
Message.Remove();
} catch (error) { }
var div = Scripts.Elements.Create('div', null, 'message', 'messageError');
var divContent = Scripts.Elements.Create('div', 'messageContent', null, 'messageContent');
var span = Scripts.Elements.Create('span');
// var iconTrash = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-eraser']);
var icon = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-bug', 'iconMessage']);
span.textContent = msg;
//span.appendChild(iconTrash);
divContent.appendChild(span);
div.appendChild(divContent);
div.appendChild(icon);
document.body.appendChild(div);
}
}
}
const Scripts = {
Grafico: Grafico,
API: API,
Elements: Elements
}
var logOut = function () {
Scripts.Elements.Message.Error("Redirecionando para o login...");
document.cookie = `username=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;`;
document.cookie = `usercod=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;`;
setTimeout(() => { window.location.href = `${urlHost}/Security/Login/`; }, 3000);
}
var recuperaUserNameCookie = function () {
try {
var cookie = document.cookie.split(';');
var name = '';
if (cookie.find(x => x.indexOf('username=') > -1) != undefined) {
name = cookie.find(x => x.indexOf('username=') > -1).replaceAll('username=', '')
}
return name;
} catch (error) {
return null;
}
};
var recuperaUserCodCookie = function () {
try {
var cookie = document.cookie.split(';');
var cod = '';
if (cookie.find(x => x.indexOf('usercod=') > -1) != undefined) {
cod = parseInt(cookie.find(x => x.indexOf('usercod=') > -1).replaceAll('usercod=', ''));
}
return cod;
} catch (error) {
return null;
}
};
//Métodos nativos reescritos
Number.prototype.toLocaleBR = (number) => {
return number.toLocaleString('pt-BR', { minimumFractionDigits: 2, style: 'currency', currency: 'BRL' });
} | }
} catch (error) {
| conditional_block |
script.padrao.js | /**
* Esse script tem dependencia das seguintes bibliotecas:
*
* class.padrao.js
* style.padrao.js
*
* Terceiros:
*
* Bootstrap 5.1 ou superior.
* ChartJS;.
*
* */
const url = new URL(document.URL);
const urlHost = `${url.protocol}//${url.host}`;
const urlAPI = `${urlHost}/api/`;
//const urlAPI = `https://intru.herokuapp.com/`;
//const urlAPI = `https://localhost:44382/`;
//Cores primarias
const colorPrymary1Hex = "#02DDE8";
const colorPrymary2Hex = "#0297F2";
const colorPrymary3Hex = "#0944DB";
const colorPrymary4Hex = "#0F02F2";
const colorPrymary5Hex = "#5D00EB";
//Cores primarias
const colorPrymary1Rgb = "2, 221, 232";
const colorPrymary2Rgb = "2, 151, 242";
const colorPrymary3Rgb = "9, 68, 219";
const colorPrymary4Rgb = "15, 2, 242";
const colorPrymary5Rgb = "93, 0, 235";
document.addEventListener('DOMContentLoaded', () => {
var URLHOST = new URL(window.location.href);
var ext = URLHOST.host.indexOf('intru') > -1;
if (ext != null && ext != undefined) {
if (ext != true) {
return;
}
}
var cod = recuperaUserCodCookie();
if (cod == '' || cod == null || cod == undefined) {
if (window.location.href == `${urlHost}/Security/Login/`) {
return;
}
logOut();
}
})
const Grafico = {
/**
* Realiza a configuração do gráfico (não faz a renderização, apenas configura, utilize o Grafico.NewChart para renderizar um grafico, passando essa função como parâmetro)
* @param {string} typeChar Tipo do grafico a ser renderizado
* @param {object} pdata Um objeto do tipo ChartData contendo dos dados a ser exibidos
* @param {object} plabels Um objeto contendo as labels do grafico
* @returns void
*/
ConfigChart: (typeChar, pChartData, plabels) => {
try {
var labels = plabels != undefined && plabels != null ? plabels : [
'Empty'
];
var data = pChartData != undefined && pChartData != null ? pChartData : {
labels: labels,
datasets: [{
label: 'Dataset',
backgroundColor: `rgb(${colorPrymary1Rgb})`,
borderWidth: 2,
borderColor: `rgb(${colorPrymary2Rgb})`,
data: [0]
}]
};
var config = {
type: typeChar != undefined && typeChar != null ? (typeChar).toString() : 'line',
data,
options: {
title: {
display: true,
fontSize: 20,
text: "Relatorio"
}
}
};
return config;
} catch (erro) {
alert(erro);
}
},
/**
* Essa função faz a renderização de um grafico utilizando a tag canvas
* @param {*} configChart Um objeto configuravel retornado pela função Grafico.ConfigChart
* @param {} canvasId Id do canvas onde o grafico sera renderizado
* @returns void
*/
NewChart: (configChart, canvasId) => {
try {
if (configChart == undefined || configChart == null &&
canvasId == undefined || canvasId == null) {
return false;
}
var chart = new Chart(document.getElementById(canvasId.toString()),
configChart);
} catch (error) {
alert(error);
}
}
}
/**
* Função responsável pelas requisições para a API.
* @param {AjaxOptions} pOptions Objeto contendo a configuração da requisição.
* @param {any} asyncRequest Um booleano indicando se a requisição é assincrona, o valor padrão é true
*/
const Ajax = (pOptions, asyncRequest = true) => {
try {
if (pOptions == undefined || pOptions == null) {
return false;
}
if (typeof (asyncRequest) != "boolean") {
return false;
}
var http = new XMLHttpRequest();
var options = AjaxOptions;
options.onload = pOptions.onload != undefined && pOptions.onload != null ? pOptions.onload : AjaxOptions.onload;
options.onerror = pOptions.onerror != undefined && pOptions.onerror != null ? pOptions.onerror : AjaxOptions.onerror;
options.responseType = pOptions.responseType != undefined && pOptions.responseType != null ? pOptions.responseType : AjaxOptions.responseType;
options.method = pOptions.method;
options.url = pOptions.url;
if (pOptions.setRequestHeader != undefined && pOptions.setRequestHeader != null) {
options.setRequestHeader.name = pOptions.setRequestHeader.name != undefined && pOptions.setRequestHeader.name != null ? pOptions.setRequestHeader.name : AjaxOptions.setRequestHeader.name;
options.setRequestHeader.value = pOptions.setRequestHeader.value != undefined && pOptions.setRequestHeader.value != null ? pOptions.setRequestHeader.value : AjaxOptions.setRequestHeader.value;
}
http.open(options.method, options.url, asyncRequest);
http.responseType = options.responseType;
http.setRequestHeader(options.setRequestHeader.name, options.setRequestHeader.value);
http.onload = options.onload;
http.onerror = options.onerror;
http.onloadstart = pOptions.onloadstart != undefined && pOptions.onloadstart != null ? pOptions.onloadstart : AjaxOptions.onloadstart;
http.onloadend = pOptions.onloadend != undefined && pOptions.onloadend != null ? pOptions.onloadend : AjaxOptions.onloadend;
if (pOptions.data != null && pOptions.data != undefined) {
var data = options.setRequestHeader.value == 'application/json' ? JSON.stringify(options.data) : encodeURI(options.data);
http.send(data);
} else {
http.send();
}
} catch (error) {
alert(error);
}
}
const API = {
/**
*Requisições do tipo GET
* @param {Opções para a definição da requisição} options
*/
GET: (options) => {
try {
if (options == undefined || options == null) {
return false;
}
Ajax(options);
} catch (error) {
alert(error);
}
},
/**
*
* @param {Opções para a definição da requisição} options
*/
POST: (options) => {
try {
Ajax(options);
} catch (error) {
alert(error);
}
}
}
const Elements = {
/**
* Retorna um elemento configurado por parametro.
* @param {any} type Tipo do elemento: div, button, input, label, canvas, p, h1, etc.
* @param {any} id
* @param {any} name
* @param {any} classe
* @param {any} style
* @param {any} onchange
*/
Create: (type = null, id = '', name = '', classe = null, style = null, classList = null, onchange = null) => {
try {
if (type == null && type == undefined) {
return null;
}
var element = document.createElement(type);
if (onchange != null, onchange != undefined) {
element.addEventListener('change', onchange);
}
if (style != null && style != undefined) {
element.style = style;
}
element.id = id;
if (classList != null && Array.isArray(classList)) {
for (i = 0; i < classList.length; i++) {
element.classList.add(classList[i]);
}
} else if (classe != null) {
element.classList.add(classe);
}
element.name = name;
return element;
} catch (error) {
console.log(error);
}
},
/**
* Renderiza um elemento de load na tela
* @param {any} type Tipo do load que sera renderizado, o tipo padrão é "Spin" (Spin, Growing)
* @param {any} idElement Id do elemento onde o load sera renderizado
* @param {any} style Tipo de cor ()
*/
Load: {
Create: (type, idElement, strStyle, strSpinnerColor) => {
try {
var span;
var div;
var spinnerColor = strSpinnerColor == null || strSpinnerColor == undefined ? "var(--colorPrymary5)" : strSpinnerColor;
var style = strStyle == null || strStyle == undefined ? `z-index: 999 !important; position: fixed !important; left: 50%;`
+ `top: 50% !important; bottom: 0 !important; color: ${spinnerColor} !important;` : strStyle;
switch (type) {
case "Spin":
div = Elements.Create('div', 'load', null, null, style, ["spinner-border", "text-primary"]);
span = Elements.Create('span', 'loadSpan', "visually-hidden", null);
div.appendChild(span);
break;
case "Growing": | var divSpinner = Elements.Create('div', 'divGrowing', null, null, `z-index: 150 !important; color: ${spinnerColor} !important;`, ["spinner-grow", "text-primary"]);
span = Elements.Create('span', 'loadGrowing', "visually-hidden", null);
divSpinner.appendChild(span);
div.appendChild(divSpinner);
break;
default:
div = Elements.Create('div', 'load', null, null, style, ["spinner-border", "text-primary"]);
span = Elements.Create('span', 'loadSpan', null, "visually-hidden");
div.appendChild(span);
break;
}
if (idElement == null || idElement == undefined) {
document.body.appendChild(div);
} else {
document.getElementById(idElement).appendChild(div);
}
} catch (error) {
console.log(error);
}
},
LoadRemove: (load) => {
try {
if (load == null || load == undefined) {
alert("O Load não pode ser nulo");
return
}
var ld = document.getElementById(load);
if (ld != null && ld != undefined) {
ld.remove();
}
} catch (error) {
console.log(error);
}
}
},
Message: {
Remove: () => {
document.querySelectorAll('.messageError').forEach((obj, index) => { obj.remove(); });
document.querySelectorAll('.messageSuccess').forEach((obj, index) => { obj.remove(); });
},
Success: (msg) => {
try {
Message.Remove();
} catch (error) { }
var div = Scripts.Elements.Create('div', null, 'message', 'messageSuccess');
var divContent = Scripts.Elements.Create('div', 'messageContent', null, 'messageContent');
var span = Scripts.Elements.Create('span');
// var iconTrash = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-eraser']);
var icon = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-check', 'iconMessage']);
span.textContent = msg;
//span.appendChild(iconTrash);
divContent.appendChild(span);
div.appendChild(divContent);
div.appendChild(icon);
document.body.appendChild(div);
},
Error: (msg) => {
try {
Message.Remove();
} catch (error) { }
var div = Scripts.Elements.Create('div', null, 'message', 'messageError');
var divContent = Scripts.Elements.Create('div', 'messageContent', null, 'messageContent');
var span = Scripts.Elements.Create('span');
// var iconTrash = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-eraser']);
var icon = Scripts.Elements.Create('i', null, null, null, null, ['fas', 'fa-bug', 'iconMessage']);
span.textContent = msg;
//span.appendChild(iconTrash);
divContent.appendChild(span);
div.appendChild(divContent);
div.appendChild(icon);
document.body.appendChild(div);
}
}
}
const Scripts = {
Grafico: Grafico,
API: API,
Elements: Elements
}
var logOut = function () {
Scripts.Elements.Message.Error("Redirecionando para o login...");
document.cookie = `username=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;`;
document.cookie = `usercod=; expires=Thu, 01 Jan 1970 00:00:00 UTC; path=/;`;
setTimeout(() => { window.location.href = `${urlHost}/Security/Login/`; }, 3000);
}
var recuperaUserNameCookie = function () {
try {
var cookie = document.cookie.split(';');
var name = '';
if (cookie.find(x => x.indexOf('username=') > -1) != undefined) {
name = cookie.find(x => x.indexOf('username=') > -1).replaceAll('username=', '')
}
return name;
} catch (error) {
return null;
}
};
var recuperaUserCodCookie = function () {
try {
var cookie = document.cookie.split(';');
var cod = '';
if (cookie.find(x => x.indexOf('usercod=') > -1) != undefined) {
cod = parseInt(cookie.find(x => x.indexOf('usercod=') > -1).replaceAll('usercod=', ''));
}
return cod;
} catch (error) {
return null;
}
};
//Métodos nativos reescritos
Number.prototype.toLocaleBR = (number) => {
return number.toLocaleString('pt-BR', { minimumFractionDigits: 2, style: 'currency', currency: 'BRL' });
} | div = Elements.Create('div', 'loadMestre', null, null, style);
| random_line_split |
mod.rs |
use rstd::prelude::*;
use codec::{Encode, Decode};
use support::{
StorageValue, StorageMap, decl_event, decl_storage, decl_module, ensure,
traits::{
Currency, ReservableCurrency,
OnFreeBalanceZero, OnUnbalanced,
WithdrawReason, ExistenceRequirement,
Imbalance, Get,
},
dispatch::Result,
};
use sr_primitives::{
transaction_validity::{
TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError,
TransactionValidity,
},
traits::{
Zero, CheckedAdd, CheckedSub,
Saturating, SignedExtension, SaturatedConversion, Convert,
},
weights::{DispatchInfo, SimpleDispatchInfo, Weight},
};
use system::{OnNewAccount, ensure_signed};
use crate::non_transfer_asset::SustainableCurrency;
/// Trait for activity
pub trait ActivityInterface<AccountId, Balance> {
fn admire(sender: &AccountId, target: &AccountId, cap: Balance) -> Result;
}
/// The module's configuration trait.
pub trait Trait: system::Trait {
/// Currency type for this module.
type Currency: ReservableCurrency<Self::AccountId>;
/// Energy type for this module
type EnergyCurrency: SustainableCurrency<Self::AccountId, Moment=Self::BlockNumber>;
/// Action point type for this module
type ActivityCurrency: Currency<Self::AccountId>;
/// Reputation point type for this module
type ReputationCurrency: Currency<Self::AccountId>;
/// Handler for the unbalanced reduction when taking transaction fees.
type TransactionPayment: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
/// The fee to be paid for making a transaction; the base.
type TransactionBaseFee: Get<BalanceOf<Self>>;
/// The fee to be paid for making a transaction; the per-byte portion.
type TransactionByteFee: Get<BalanceOf<Self>>;
/// The base Energy amount of activated account
type EnergyBaseAmount: Get<EnergyOf<Self>>;
/// Convert a weight value into a deductible fee based on the currency type.
type WeightToFee: Convert<Weight, BalanceOf<Self>>;
/// Convert a fee value to energy point
type FeeToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>;
/// Convert a charging value to energy point
type ChargingToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>;
/// Convert an energy point to fee value
type EnergyToFee: Convert<EnergyOf<Self>, BalanceOf<Self>>;
/// Convert an energy point to locking block number
type EnergyToLocking: Convert<EnergyOf<Self>, <Self as system::Trait>::BlockNumber>;
/// Convert an energy point to action point
type EnergyToActionPoint: Convert<EnergyOf<Self>, ActionPointOf<Self>>;
/// Convert an action point to reputation
type ActionPointToReputation: Convert<ActionPointOf<Self>, ReputationOf<Self>>;
}
// Balance zone
pub type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance;
type NegativeImbalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::NegativeImbalance;
// Energy zone
pub type EnergyOf<T> = <<T as Trait>::EnergyCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// Action zone
pub type ActionPointOf<T> = <<T as Trait>::ActivityCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// Reputation zone
pub type ReputationOf<T> = <<T as Trait>::ReputationCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// This module's storage items.
decl_storage! {
trait Store for Module<T: Trait> as Activities {
/// Map from all extend
pub Charged get(charged): map T::AccountId => BalanceOf<T>;
}
}
decl_event!(
pub enum Event<T> where
AccountId = <T as system::Trait>::AccountId,
Balance = BalanceOf<T>,
Energy = EnergyOf<T>,
Reputation = ReputationOf<T>
{
// Fee payment
FeePayed(AccountId, Energy, Balance),
EnergyRecovered(AccountId, Energy),
// Reputation part
ReputationReward(AccountId, Reputation),
ReputationSlash(AccountId, Reputation),
}
);
// The module's dispatchable functions.
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initializing events
fn deposit_event() = default;
/// Bond to increase Energy
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn charge(
origin,
#[compact] value: BalanceOf<T>
) {
let who = ensure_signed(origin)?;
Self::charge_for_energy(&who, value)?;
}
/// UnBond to decrease Energy
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn discharge(
origin,
#[compact] value: BalanceOf<T>
) {
let who = ensure_signed(origin)?;
Self::discharge_for_energy(&who, value)?;
}
}
}
// The module's main implement
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
pub fn available_energy(who: &T::AccountId) -> EnergyOf<T> {
T::EnergyCurrency::available_free_balance(who)
}
// PRIVATE MUTABLES
fn charge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result {
// ensure reserve
if !T::Currency::can_reserve(who, value) {
return Err("not enough free funds");
}
// check current_charged
let current_charged = <Charged<T>>::get(who);
let new_charged = current_charged.checked_add(&value).ok_or("account has charged overflow")?;
let energy_to_charge = T::ChargingToEnergy::convert(value);
let current_energy = T::EnergyCurrency::free_balance(who);
current_energy.checked_add(&energy_to_charge).ok_or("Overflow energy amount")?;
// MUTABLES
T::Currency::reserve(who, value)?;
T::EnergyCurrency::deposit_into_existing(who, energy_to_charge)?;
<Charged<T>>::insert(who, new_charged);
Ok(())
}
fn discharge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result {
// check current_charged
let current_charged = <Charged<T>>::get(who);
let new_charged = current_charged.checked_sub(&value).ok_or("account has too few charged funds")?;
let energy_to_discharge = T::ChargingToEnergy::convert(value);
let current_energy = T::EnergyCurrency::free_balance(who);
current_energy.checked_sub(&energy_to_discharge).ok_or("account has too few energy")?;
// MUTABLES
T::EnergyCurrency::withdraw(who, energy_to_discharge, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?;
T::Currency::unreserve(who, value);
<Charged<T>>::insert(who, new_charged);
Ok(())
}
}
impl<T: Trait> OnNewAccount<T::AccountId> for Module<T> {
// Implementation of the config type managing the creation of new accounts.
fn on_new_account(who: &T::AccountId) {
T::EnergyCurrency::deposit_creating(who, T::EnergyBaseAmount::get());
}
}
impl<T: Trait> OnFreeBalanceZero<T::AccountId> for Module<T> {
fn on_free_balance_zero(who: &T::AccountId) {
let dust = <Charged<T>>::take(who);
if !dust.is_zero() {
T::Currency::unreserve(who, dust);
}
T::EnergyCurrency::slash(who, T::EnergyCurrency::total_balance(who));
}
}
impl<T: Trait> ActivityInterface<T::AccountId, ActionPointOf<T>> for Module<T> {
// do admire
fn admire(sender: &T::AccountId, target: &T::AccountId, cap: ActionPointOf<T>) -> Result {
let earned_rp = T::ActionPointToReputation::convert(cap.clone());
ensure!(!earned_rp.is_zero(), "action point too low ");
T::ActivityCurrency::withdraw(sender, cap, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?;
T::ReputationCurrency::deposit_into_existing(target, earned_rp).unwrap();
Ok(())
}
}
/// Require the transactor pay for themselves and maybe include a tip to gain additional priority
/// in the queue.
#[derive(Encode, Decode, Clone, Eq, PartialEq)]
pub struct TakeFees<T: Trait>(#[codec(compact)] BalanceOf<T>);
impl<T: Trait> TakeFees<T> {
/// utility constructor. Used only in client/factory code.
pub fn from(fee: BalanceOf<T>) -> Self {
Self(fee)
}
/// Compute the final fee value for a particular transaction.
///
/// The final fee is composed of:
/// - _length-fee_: This is the amount paid merely to pay for size of the transaction.
/// - _weight-fee_: This amount is computed based on the weight of the transaction. Unlike
/// size-fee, this is not input dependent and reflects the _complexity_ of the execution
/// and the time it consumes.
/// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed
/// transactions can have a tip.
fn compute_fee(len: usize, info: DispatchInfo, tip: BalanceOf<T>) -> BalanceOf<T> {
let len_fee = if info.pay_length_fee() {
let len = <BalanceOf<T> as From<u32>>::from(len as u32);
let base = T::TransactionBaseFee::get();
let per_byte = T::TransactionByteFee::get();
base.saturating_add(per_byte.saturating_mul(len))
} else {
Zero::zero()
};
let weight_fee = {
// cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded`
// maximum of its data type, which is not desired.
let capped_weight = info.weight.min(<T as system::Trait>::MaximumBlockWeight::get());
let weight_update = <system::Module<T>>::next_weight_multiplier();
let adjusted_weight = weight_update.apply_to(capped_weight);
T::WeightToFee::convert(adjusted_weight)
};
len_fee.saturating_add(weight_fee).saturating_add(tip)
}
}
#[cfg(feature = "std")]
impl<T: Trait> rstd::fmt::Debug for TakeFees<T> {
fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result {
self.0.fmt(f)
}
}
impl<T: Trait> SignedExtension for TakeFees<T> where
BalanceOf<T>: core::marker::Send + core::marker::Sync
{
type AccountId = <T as system::Trait>::AccountId;
type Call = T::Call;
type AdditionalSigned = ();
type Pre = ();
fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) }
fn validate(
&self,
who: &Self::AccountId,
_call: &Self::Call,
info: DispatchInfo,
len: usize,
) -> TransactionValidity {
let fee = Self::compute_fee(len, info, self.0);
// pay fees.
// first use energy, second use balance
let required_energy = T::FeeToEnergy::convert(fee);
let available_energy = T::EnergyCurrency::available_free_balance(who);
let using_energy = required_energy.min(available_energy);
let mut using_fee = BalanceOf::<T>::zero();
if using_energy < required_energy {
using_fee = T::EnergyToFee::convert(required_energy - using_energy);
}
let now = <system::Module<T>>::block_number();
let locking_block = T::EnergyToLocking::convert(using_energy);
// lock energy and get unlocked energy
let unlocked_energy = match T::EnergyCurrency::use_and_lock_free_balance(who, using_energy.clone(), now + locking_block) {
Ok(result) => result,
Err(_) => return InvalidTransaction::Payment.into(),
};
// dispatch EnergyRecovered
if !unlocked_energy.is_zero() {
<Module<T>>::deposit_event(RawEvent::EnergyRecovered(who.clone(), unlocked_energy));
}
let imbalance = match T::Currency::withdraw(
who,
using_fee.clone(),
WithdrawReason::TransactionPayment,
ExistenceRequirement::KeepAlive,
) {
Ok(imbalance) => imbalance,
Err(_) => return InvalidTransaction::Payment.into(),
};
T::TransactionPayment::on_unbalanced(imbalance);
// increate action point
if !using_energy.is_zero() {
let earned_ap = T::EnergyToActionPoint::convert(using_energy.clone());
if !earned_ap.is_zero() {
T::ActivityCurrency::deposit_into_existing(who, earned_ap).unwrap();
}
}
// Send event
<Module<T>>::deposit_event(RawEvent::FeePayed(who.clone(), using_energy, using_fee));
let mut r = ValidTransaction::default();
// NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which
// will be a bit more than setting the priority to tip. For now, this is enough.
r.priority = fee.saturated_into::<TransactionPriority>();
Ok(r)
}
} | //! # Activity Module
//!
#![cfg_attr(not(feature = "std"), no_std)] | random_line_split | |
mod.rs | //! # Activity Module
//!
#![cfg_attr(not(feature = "std"), no_std)]
use rstd::prelude::*;
use codec::{Encode, Decode};
use support::{
StorageValue, StorageMap, decl_event, decl_storage, decl_module, ensure,
traits::{
Currency, ReservableCurrency,
OnFreeBalanceZero, OnUnbalanced,
WithdrawReason, ExistenceRequirement,
Imbalance, Get,
},
dispatch::Result,
};
use sr_primitives::{
transaction_validity::{
TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError,
TransactionValidity,
},
traits::{
Zero, CheckedAdd, CheckedSub,
Saturating, SignedExtension, SaturatedConversion, Convert,
},
weights::{DispatchInfo, SimpleDispatchInfo, Weight},
};
use system::{OnNewAccount, ensure_signed};
use crate::non_transfer_asset::SustainableCurrency;
/// Trait for activity
pub trait ActivityInterface<AccountId, Balance> {
fn admire(sender: &AccountId, target: &AccountId, cap: Balance) -> Result;
}
/// The module's configuration trait.
pub trait Trait: system::Trait {
/// Currency type for this module.
type Currency: ReservableCurrency<Self::AccountId>;
/// Energy type for this module
type EnergyCurrency: SustainableCurrency<Self::AccountId, Moment=Self::BlockNumber>;
/// Action point type for this module
type ActivityCurrency: Currency<Self::AccountId>;
/// Reputation point type for this module
type ReputationCurrency: Currency<Self::AccountId>;
/// Handler for the unbalanced reduction when taking transaction fees.
type TransactionPayment: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
/// The fee to be paid for making a transaction; the base.
type TransactionBaseFee: Get<BalanceOf<Self>>;
/// The fee to be paid for making a transaction; the per-byte portion.
type TransactionByteFee: Get<BalanceOf<Self>>;
/// The base Energy amount of activated account
type EnergyBaseAmount: Get<EnergyOf<Self>>;
/// Convert a weight value into a deductible fee based on the currency type.
type WeightToFee: Convert<Weight, BalanceOf<Self>>;
/// Convert a fee value to energy point
type FeeToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>;
/// Convert a charging value to energy point
type ChargingToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>;
/// Convert an energy point to fee value
type EnergyToFee: Convert<EnergyOf<Self>, BalanceOf<Self>>;
/// Convert an energy point to locking block number
type EnergyToLocking: Convert<EnergyOf<Self>, <Self as system::Trait>::BlockNumber>;
/// Convert an energy point to action point
type EnergyToActionPoint: Convert<EnergyOf<Self>, ActionPointOf<Self>>;
/// Convert an action point to reputation
type ActionPointToReputation: Convert<ActionPointOf<Self>, ReputationOf<Self>>;
}
// Balance zone
pub type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance;
type NegativeImbalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::NegativeImbalance;
// Energy zone
pub type EnergyOf<T> = <<T as Trait>::EnergyCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// Action zone
pub type ActionPointOf<T> = <<T as Trait>::ActivityCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// Reputation zone
pub type ReputationOf<T> = <<T as Trait>::ReputationCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// This module's storage items.
decl_storage! {
trait Store for Module<T: Trait> as Activities {
/// Map from all extend
pub Charged get(charged): map T::AccountId => BalanceOf<T>;
}
}
decl_event!(
pub enum Event<T> where
AccountId = <T as system::Trait>::AccountId,
Balance = BalanceOf<T>,
Energy = EnergyOf<T>,
Reputation = ReputationOf<T>
{
// Fee payment
FeePayed(AccountId, Energy, Balance),
EnergyRecovered(AccountId, Energy),
// Reputation part
ReputationReward(AccountId, Reputation),
ReputationSlash(AccountId, Reputation),
}
);
// The module's dispatchable functions.
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initializing events
fn deposit_event() = default;
/// Bond to increase Energy
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn charge(
origin,
#[compact] value: BalanceOf<T>
) {
let who = ensure_signed(origin)?;
Self::charge_for_energy(&who, value)?;
}
/// UnBond to decrease Energy
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn discharge(
origin,
#[compact] value: BalanceOf<T>
) {
let who = ensure_signed(origin)?;
Self::discharge_for_energy(&who, value)?;
}
}
}
// The module's main implement
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
pub fn available_energy(who: &T::AccountId) -> EnergyOf<T> {
T::EnergyCurrency::available_free_balance(who)
}
// PRIVATE MUTABLES
fn charge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result {
// ensure reserve
if !T::Currency::can_reserve(who, value) {
return Err("not enough free funds");
}
// check current_charged
let current_charged = <Charged<T>>::get(who);
let new_charged = current_charged.checked_add(&value).ok_or("account has charged overflow")?;
let energy_to_charge = T::ChargingToEnergy::convert(value);
let current_energy = T::EnergyCurrency::free_balance(who);
current_energy.checked_add(&energy_to_charge).ok_or("Overflow energy amount")?;
// MUTABLES
T::Currency::reserve(who, value)?;
T::EnergyCurrency::deposit_into_existing(who, energy_to_charge)?;
<Charged<T>>::insert(who, new_charged);
Ok(())
}
fn discharge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result {
// check current_charged
let current_charged = <Charged<T>>::get(who);
let new_charged = current_charged.checked_sub(&value).ok_or("account has too few charged funds")?;
let energy_to_discharge = T::ChargingToEnergy::convert(value);
let current_energy = T::EnergyCurrency::free_balance(who);
current_energy.checked_sub(&energy_to_discharge).ok_or("account has too few energy")?;
// MUTABLES
T::EnergyCurrency::withdraw(who, energy_to_discharge, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?;
T::Currency::unreserve(who, value);
<Charged<T>>::insert(who, new_charged);
Ok(())
}
}
impl<T: Trait> OnNewAccount<T::AccountId> for Module<T> {
// Implementation of the config type managing the creation of new accounts.
fn on_new_account(who: &T::AccountId) {
T::EnergyCurrency::deposit_creating(who, T::EnergyBaseAmount::get());
}
}
impl<T: Trait> OnFreeBalanceZero<T::AccountId> for Module<T> {
fn on_free_balance_zero(who: &T::AccountId) {
let dust = <Charged<T>>::take(who);
if !dust.is_zero() {
T::Currency::unreserve(who, dust);
}
T::EnergyCurrency::slash(who, T::EnergyCurrency::total_balance(who));
}
}
impl<T: Trait> ActivityInterface<T::AccountId, ActionPointOf<T>> for Module<T> {
// do admire
fn admire(sender: &T::AccountId, target: &T::AccountId, cap: ActionPointOf<T>) -> Result {
let earned_rp = T::ActionPointToReputation::convert(cap.clone());
ensure!(!earned_rp.is_zero(), "action point too low ");
T::ActivityCurrency::withdraw(sender, cap, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?;
T::ReputationCurrency::deposit_into_existing(target, earned_rp).unwrap();
Ok(())
}
}
/// Require the transactor pay for themselves and maybe include a tip to gain additional priority
/// in the queue.
#[derive(Encode, Decode, Clone, Eq, PartialEq)]
pub struct | <T: Trait>(#[codec(compact)] BalanceOf<T>);
impl<T: Trait> TakeFees<T> {
/// utility constructor. Used only in client/factory code.
pub fn from(fee: BalanceOf<T>) -> Self {
Self(fee)
}
/// Compute the final fee value for a particular transaction.
///
/// The final fee is composed of:
/// - _length-fee_: This is the amount paid merely to pay for size of the transaction.
/// - _weight-fee_: This amount is computed based on the weight of the transaction. Unlike
/// size-fee, this is not input dependent and reflects the _complexity_ of the execution
/// and the time it consumes.
/// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed
/// transactions can have a tip.
fn compute_fee(len: usize, info: DispatchInfo, tip: BalanceOf<T>) -> BalanceOf<T> {
let len_fee = if info.pay_length_fee() {
let len = <BalanceOf<T> as From<u32>>::from(len as u32);
let base = T::TransactionBaseFee::get();
let per_byte = T::TransactionByteFee::get();
base.saturating_add(per_byte.saturating_mul(len))
} else {
Zero::zero()
};
let weight_fee = {
// cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded`
// maximum of its data type, which is not desired.
let capped_weight = info.weight.min(<T as system::Trait>::MaximumBlockWeight::get());
let weight_update = <system::Module<T>>::next_weight_multiplier();
let adjusted_weight = weight_update.apply_to(capped_weight);
T::WeightToFee::convert(adjusted_weight)
};
len_fee.saturating_add(weight_fee).saturating_add(tip)
}
}
#[cfg(feature = "std")]
impl<T: Trait> rstd::fmt::Debug for TakeFees<T> {
fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result {
self.0.fmt(f)
}
}
impl<T: Trait> SignedExtension for TakeFees<T> where
BalanceOf<T>: core::marker::Send + core::marker::Sync
{
type AccountId = <T as system::Trait>::AccountId;
type Call = T::Call;
type AdditionalSigned = ();
type Pre = ();
fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) }
fn validate(
&self,
who: &Self::AccountId,
_call: &Self::Call,
info: DispatchInfo,
len: usize,
) -> TransactionValidity {
let fee = Self::compute_fee(len, info, self.0);
// pay fees.
// first use energy, second use balance
let required_energy = T::FeeToEnergy::convert(fee);
let available_energy = T::EnergyCurrency::available_free_balance(who);
let using_energy = required_energy.min(available_energy);
let mut using_fee = BalanceOf::<T>::zero();
if using_energy < required_energy {
using_fee = T::EnergyToFee::convert(required_energy - using_energy);
}
let now = <system::Module<T>>::block_number();
let locking_block = T::EnergyToLocking::convert(using_energy);
// lock energy and get unlocked energy
let unlocked_energy = match T::EnergyCurrency::use_and_lock_free_balance(who, using_energy.clone(), now + locking_block) {
Ok(result) => result,
Err(_) => return InvalidTransaction::Payment.into(),
};
// dispatch EnergyRecovered
if !unlocked_energy.is_zero() {
<Module<T>>::deposit_event(RawEvent::EnergyRecovered(who.clone(), unlocked_energy));
}
let imbalance = match T::Currency::withdraw(
who,
using_fee.clone(),
WithdrawReason::TransactionPayment,
ExistenceRequirement::KeepAlive,
) {
Ok(imbalance) => imbalance,
Err(_) => return InvalidTransaction::Payment.into(),
};
T::TransactionPayment::on_unbalanced(imbalance);
// increate action point
if !using_energy.is_zero() {
let earned_ap = T::EnergyToActionPoint::convert(using_energy.clone());
if !earned_ap.is_zero() {
T::ActivityCurrency::deposit_into_existing(who, earned_ap).unwrap();
}
}
// Send event
<Module<T>>::deposit_event(RawEvent::FeePayed(who.clone(), using_energy, using_fee));
let mut r = ValidTransaction::default();
// NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which
// will be a bit more than setting the priority to tip. For now, this is enough.
r.priority = fee.saturated_into::<TransactionPriority>();
Ok(r)
}
}
| TakeFees | identifier_name |
mod.rs | //! # Activity Module
//!
#![cfg_attr(not(feature = "std"), no_std)]
use rstd::prelude::*;
use codec::{Encode, Decode};
use support::{
StorageValue, StorageMap, decl_event, decl_storage, decl_module, ensure,
traits::{
Currency, ReservableCurrency,
OnFreeBalanceZero, OnUnbalanced,
WithdrawReason, ExistenceRequirement,
Imbalance, Get,
},
dispatch::Result,
};
use sr_primitives::{
transaction_validity::{
TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError,
TransactionValidity,
},
traits::{
Zero, CheckedAdd, CheckedSub,
Saturating, SignedExtension, SaturatedConversion, Convert,
},
weights::{DispatchInfo, SimpleDispatchInfo, Weight},
};
use system::{OnNewAccount, ensure_signed};
use crate::non_transfer_asset::SustainableCurrency;
/// Trait for activity
pub trait ActivityInterface<AccountId, Balance> {
fn admire(sender: &AccountId, target: &AccountId, cap: Balance) -> Result;
}
/// The module's configuration trait.
pub trait Trait: system::Trait {
/// Currency type for this module.
type Currency: ReservableCurrency<Self::AccountId>;
/// Energy type for this module
type EnergyCurrency: SustainableCurrency<Self::AccountId, Moment=Self::BlockNumber>;
/// Action point type for this module
type ActivityCurrency: Currency<Self::AccountId>;
/// Reputation point type for this module
type ReputationCurrency: Currency<Self::AccountId>;
/// Handler for the unbalanced reduction when taking transaction fees.
type TransactionPayment: OnUnbalanced<NegativeImbalanceOf<Self>>;
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
/// The fee to be paid for making a transaction; the base.
type TransactionBaseFee: Get<BalanceOf<Self>>;
/// The fee to be paid for making a transaction; the per-byte portion.
type TransactionByteFee: Get<BalanceOf<Self>>;
/// The base Energy amount of activated account
type EnergyBaseAmount: Get<EnergyOf<Self>>;
/// Convert a weight value into a deductible fee based on the currency type.
type WeightToFee: Convert<Weight, BalanceOf<Self>>;
/// Convert a fee value to energy point
type FeeToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>;
/// Convert a charging value to energy point
type ChargingToEnergy: Convert<BalanceOf<Self>, EnergyOf<Self>>;
/// Convert an energy point to fee value
type EnergyToFee: Convert<EnergyOf<Self>, BalanceOf<Self>>;
/// Convert an energy point to locking block number
type EnergyToLocking: Convert<EnergyOf<Self>, <Self as system::Trait>::BlockNumber>;
/// Convert an energy point to action point
type EnergyToActionPoint: Convert<EnergyOf<Self>, ActionPointOf<Self>>;
/// Convert an action point to reputation
type ActionPointToReputation: Convert<ActionPointOf<Self>, ReputationOf<Self>>;
}
// Balance zone
pub type BalanceOf<T> = <<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::Balance;
type NegativeImbalanceOf<T> =
<<T as Trait>::Currency as Currency<<T as system::Trait>::AccountId>>::NegativeImbalance;
// Energy zone
pub type EnergyOf<T> = <<T as Trait>::EnergyCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// Action zone
pub type ActionPointOf<T> = <<T as Trait>::ActivityCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// Reputation zone
pub type ReputationOf<T> = <<T as Trait>::ReputationCurrency as Currency<<T as system::Trait>::AccountId>>::Balance;
// This module's storage items.
decl_storage! {
trait Store for Module<T: Trait> as Activities {
/// Map from all extend
pub Charged get(charged): map T::AccountId => BalanceOf<T>;
}
}
decl_event!(
pub enum Event<T> where
AccountId = <T as system::Trait>::AccountId,
Balance = BalanceOf<T>,
Energy = EnergyOf<T>,
Reputation = ReputationOf<T>
{
// Fee payment
FeePayed(AccountId, Energy, Balance),
EnergyRecovered(AccountId, Energy),
// Reputation part
ReputationReward(AccountId, Reputation),
ReputationSlash(AccountId, Reputation),
}
);
// The module's dispatchable functions.
decl_module! {
/// The module declaration.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Initializing events
fn deposit_event() = default;
/// Bond to increase Energy
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn charge(
origin,
#[compact] value: BalanceOf<T>
) {
let who = ensure_signed(origin)?;
Self::charge_for_energy(&who, value)?;
}
/// UnBond to decrease Energy
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn discharge(
origin,
#[compact] value: BalanceOf<T>
) {
let who = ensure_signed(origin)?;
Self::discharge_for_energy(&who, value)?;
}
}
}
// The module's main implement
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
pub fn available_energy(who: &T::AccountId) -> EnergyOf<T> |
// PRIVATE MUTABLES
fn charge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result {
// ensure reserve
if !T::Currency::can_reserve(who, value) {
return Err("not enough free funds");
}
// check current_charged
let current_charged = <Charged<T>>::get(who);
let new_charged = current_charged.checked_add(&value).ok_or("account has charged overflow")?;
let energy_to_charge = T::ChargingToEnergy::convert(value);
let current_energy = T::EnergyCurrency::free_balance(who);
current_energy.checked_add(&energy_to_charge).ok_or("Overflow energy amount")?;
// MUTABLES
T::Currency::reserve(who, value)?;
T::EnergyCurrency::deposit_into_existing(who, energy_to_charge)?;
<Charged<T>>::insert(who, new_charged);
Ok(())
}
fn discharge_for_energy(who: &T::AccountId, value: BalanceOf<T>) -> Result {
// check current_charged
let current_charged = <Charged<T>>::get(who);
let new_charged = current_charged.checked_sub(&value).ok_or("account has too few charged funds")?;
let energy_to_discharge = T::ChargingToEnergy::convert(value);
let current_energy = T::EnergyCurrency::free_balance(who);
current_energy.checked_sub(&energy_to_discharge).ok_or("account has too few energy")?;
// MUTABLES
T::EnergyCurrency::withdraw(who, energy_to_discharge, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?;
T::Currency::unreserve(who, value);
<Charged<T>>::insert(who, new_charged);
Ok(())
}
}
impl<T: Trait> OnNewAccount<T::AccountId> for Module<T> {
// Implementation of the config type managing the creation of new accounts.
fn on_new_account(who: &T::AccountId) {
T::EnergyCurrency::deposit_creating(who, T::EnergyBaseAmount::get());
}
}
impl<T: Trait> OnFreeBalanceZero<T::AccountId> for Module<T> {
fn on_free_balance_zero(who: &T::AccountId) {
let dust = <Charged<T>>::take(who);
if !dust.is_zero() {
T::Currency::unreserve(who, dust);
}
T::EnergyCurrency::slash(who, T::EnergyCurrency::total_balance(who));
}
}
impl<T: Trait> ActivityInterface<T::AccountId, ActionPointOf<T>> for Module<T> {
// do admire
fn admire(sender: &T::AccountId, target: &T::AccountId, cap: ActionPointOf<T>) -> Result {
let earned_rp = T::ActionPointToReputation::convert(cap.clone());
ensure!(!earned_rp.is_zero(), "action point too low ");
T::ActivityCurrency::withdraw(sender, cap, WithdrawReason::Fee, ExistenceRequirement::KeepAlive)?;
T::ReputationCurrency::deposit_into_existing(target, earned_rp).unwrap();
Ok(())
}
}
/// Require the transactor pay for themselves and maybe include a tip to gain additional priority
/// in the queue.
#[derive(Encode, Decode, Clone, Eq, PartialEq)]
pub struct TakeFees<T: Trait>(#[codec(compact)] BalanceOf<T>);
impl<T: Trait> TakeFees<T> {
/// utility constructor. Used only in client/factory code.
pub fn from(fee: BalanceOf<T>) -> Self {
Self(fee)
}
/// Compute the final fee value for a particular transaction.
///
/// The final fee is composed of:
/// - _length-fee_: This is the amount paid merely to pay for size of the transaction.
/// - _weight-fee_: This amount is computed based on the weight of the transaction. Unlike
/// size-fee, this is not input dependent and reflects the _complexity_ of the execution
/// and the time it consumes.
/// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed
/// transactions can have a tip.
fn compute_fee(len: usize, info: DispatchInfo, tip: BalanceOf<T>) -> BalanceOf<T> {
let len_fee = if info.pay_length_fee() {
let len = <BalanceOf<T> as From<u32>>::from(len as u32);
let base = T::TransactionBaseFee::get();
let per_byte = T::TransactionByteFee::get();
base.saturating_add(per_byte.saturating_mul(len))
} else {
Zero::zero()
};
let weight_fee = {
// cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded`
// maximum of its data type, which is not desired.
let capped_weight = info.weight.min(<T as system::Trait>::MaximumBlockWeight::get());
let weight_update = <system::Module<T>>::next_weight_multiplier();
let adjusted_weight = weight_update.apply_to(capped_weight);
T::WeightToFee::convert(adjusted_weight)
};
len_fee.saturating_add(weight_fee).saturating_add(tip)
}
}
#[cfg(feature = "std")]
impl<T: Trait> rstd::fmt::Debug for TakeFees<T> {
fn fmt(&self, f: &mut rstd::fmt::Formatter) -> rstd::fmt::Result {
self.0.fmt(f)
}
}
impl<T: Trait> SignedExtension for TakeFees<T> where
BalanceOf<T>: core::marker::Send + core::marker::Sync
{
type AccountId = <T as system::Trait>::AccountId;
type Call = T::Call;
type AdditionalSigned = ();
type Pre = ();
fn additional_signed(&self) -> rstd::result::Result<(), TransactionValidityError> { Ok(()) }
fn validate(
&self,
who: &Self::AccountId,
_call: &Self::Call,
info: DispatchInfo,
len: usize,
) -> TransactionValidity {
let fee = Self::compute_fee(len, info, self.0);
// pay fees.
// first use energy, second use balance
let required_energy = T::FeeToEnergy::convert(fee);
let available_energy = T::EnergyCurrency::available_free_balance(who);
let using_energy = required_energy.min(available_energy);
let mut using_fee = BalanceOf::<T>::zero();
if using_energy < required_energy {
using_fee = T::EnergyToFee::convert(required_energy - using_energy);
}
let now = <system::Module<T>>::block_number();
let locking_block = T::EnergyToLocking::convert(using_energy);
// lock energy and get unlocked energy
let unlocked_energy = match T::EnergyCurrency::use_and_lock_free_balance(who, using_energy.clone(), now + locking_block) {
Ok(result) => result,
Err(_) => return InvalidTransaction::Payment.into(),
};
// dispatch EnergyRecovered
if !unlocked_energy.is_zero() {
<Module<T>>::deposit_event(RawEvent::EnergyRecovered(who.clone(), unlocked_energy));
}
let imbalance = match T::Currency::withdraw(
who,
using_fee.clone(),
WithdrawReason::TransactionPayment,
ExistenceRequirement::KeepAlive,
) {
Ok(imbalance) => imbalance,
Err(_) => return InvalidTransaction::Payment.into(),
};
T::TransactionPayment::on_unbalanced(imbalance);
// increate action point
if !using_energy.is_zero() {
let earned_ap = T::EnergyToActionPoint::convert(using_energy.clone());
if !earned_ap.is_zero() {
T::ActivityCurrency::deposit_into_existing(who, earned_ap).unwrap();
}
}
// Send event
<Module<T>>::deposit_event(RawEvent::FeePayed(who.clone(), using_energy, using_fee));
let mut r = ValidTransaction::default();
// NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which
// will be a bit more than setting the priority to tip. For now, this is enough.
r.priority = fee.saturated_into::<TransactionPriority>();
Ok(r)
}
}
| {
T::EnergyCurrency::available_free_balance(who)
} | identifier_body |
compile.ts | import fs from "fs";
import { SandboxStatus } from "simple-sandbox";
import objectHash from "object-hash";
import LruCache from "lru-cache";
import winston from "winston";
import { v4 as uuid } from "uuid";
import getLanguage, { LanguageConfig } from "./languages";
import { MappedPath, safelyJoinPath, ensureDirectoryEmpty } from "./utils";
import { readFileOmitted, OmittableString, prependOmittableString } from "./omittableString";
import {
SandboxConfigWithoutMountInfo,
runSandbox,
SANDBOX_INSIDE_PATH_SOURCE,
SANDBOX_INSIDE_PATH_BINARY
} from "./sandbox";
import config, { serverSideConfig } from "./config";
import { runTaskQueued } from "./taskQueue";
import { getFile, getFileHash } from "./file";
import * as fsNative from "./fsNative";
export interface CompilationConfig extends SandboxConfigWithoutMountInfo {
messageFile?: string; // The file contains the message to display for user (in the binary directory)
extraInfoFile?: string; // The file contains the extra information for running the compiled program (in the binary directory)
workingDirectory: string; // The working directory for the compiler or script
}
export interface CompileTask {
language: string;
code: string;
compileAndRunOptions: unknown;
extraSourceFiles?: Record<string, string>;
}
async function hashCompileTask(compileTask: CompileTask): Promise<string> {
return objectHash({
language: compileTask.language,
code: compileTask.code,
compileAndRunOptions: compileTask.compileAndRunOptions,
extraSourceFiles:
compileTask.extraSourceFiles &&
(await Promise.all(
Object.entries(compileTask.extraSourceFiles).map(async ([filename, fileUuid]) => [
filename,
await getFileHash(fileUuid)
])
))
});
}
export interface CompileResult {
compileTaskHash: string;
success: boolean;
message: OmittableString;
}
// These class implements reference count to prevent a compile result being deleted
// from the disk during using
export class CompileResultSuccess implements CompileResult {
public readonly success: true = true;
constructor(
public readonly compileTaskHash: string,
public readonly message: OmittableString,
public readonly binaryDirectory: string,
public readonly binaryDirectorySize: number,
public readonly extraInfo: string
) {}
// The referenceCount is initially zero, the result must be referenced at least once
// Then when dereferenced to zero it will be deleted from the disk
private referenceCount: number = 0;
public reference() |
public async dereference() {
if (--this.referenceCount === 0) {
await fsNative.remove(this.binaryDirectory);
}
}
async copyTo(newBinaryDirectory: string) {
this.reference();
await fsNative.copy(this.binaryDirectory, newBinaryDirectory);
await this.dereference();
return new CompileResultSuccess(
this.compileTaskHash,
this.message,
newBinaryDirectory,
this.binaryDirectorySize,
this.extraInfo
);
}
}
// Why NOT using the task hash as the directory name? Because there'll be a race condition
// If a compile result is disposed from the cache, but still have at least one reference
// e.g. referenced by a judge task which have not finished copying the binary files to its working directory
// Another cache set operation with the same task will overwrite the files (and may cause the judge task using a corrupted file)
// Use a random uuid as the key instead to prevent this
class CompileResultCache {
private readonly lruCache = new LruCache<string, CompileResultSuccess>({
max: config.binaryCacheMaxSize,
length: result => result.binaryDirectorySize,
dispose: (compileTaskHash, result) => {
winston.verbose(`dispose() from compile result cache: ${compileTaskHash}`);
setImmediate(() => {
// It's safe NOT to await it..
result.dereference().catch(e => winston.error(`Failed to remove compile result on evicting cache: ${e.stack}`));
});
}
});
// The set()/get()'s returned result is reference()-ed
// and must be dereference()-ed
public get(compileTaskHash: string): CompileResultSuccess {
if (this.lruCache.has(compileTaskHash)) return this.lruCache.get(compileTaskHash).reference();
return null;
}
// set() should not be called twice with the same compileTaskHash in the same time
// i.e. call another time with the same compileTaskHash before the previous finished
public async set(compileTaskHash: string, result: CompileResultSuccess): Promise<CompileResultSuccess> {
if (this.lruCache.has(compileTaskHash)) return this.lruCache.get(compileTaskHash).reference();
const newCompileResult = await result.copyTo(safelyJoinPath(config.binaryCacheStore, uuid()));
newCompileResult.reference();
this.lruCache.set(compileTaskHash, newCompileResult);
return newCompileResult.reference();
}
}
interface PendingCompileTask {
resultConsumers: ((compileResult: CompileResult) => void)[];
promise: Promise<void>;
}
// If there're multiple calls to compile() with the same compileTask, it's to prevent the task to be compiled multiple times
// compileTaskHash -> Promise of task
const pendingCompileTasks: Map<string, PendingCompileTask> = new Map();
const compileResultCache = new CompileResultCache();
export async function compile(compileTask: CompileTask): Promise<CompileResult> {
const languageConfig = getLanguage(compileTask.language);
const compileTaskHash = await hashCompileTask(compileTask);
const cachedResult = compileResultCache.get(compileTaskHash);
if (cachedResult) {
winston.verbose(`Use cached compile reslt for ${compileTaskHash}`);
return cachedResult;
}
let pendingCompileTask = pendingCompileTasks.get(compileTaskHash);
if (!pendingCompileTask) {
// Use a array of functions to ensure every calls to compile() of this task could get
// a valid CompileResultSuccess object (with positive referenceCount)
// I don't think "await promise" is guaranteed to return in a synchronous flow after the promise resolved
const resultConsumers = [];
pendingCompileTasks.set(
compileTaskHash,
(pendingCompileTask = {
resultConsumers,
promise: runTaskQueued(async taskWorkingDirectory => {
// The compileResult is already reference()-ed
const compileResult = await doCompile(compileTask, compileTaskHash, languageConfig, taskWorkingDirectory);
winston.verbose(`Compile result: ${JSON.stringify(compileResult)}`);
for (const resultConsumer of resultConsumers)
resultConsumer(compileResult instanceof CompileResultSuccess ? compileResult.reference() : compileResult);
if (compileResult instanceof CompileResultSuccess) await compileResult.dereference();
}).finally(() => pendingCompileTasks.delete(compileTaskHash))
})
);
}
let result: CompileResult;
pendingCompileTask.resultConsumers.push(r => {
result = r;
});
await pendingCompileTask.promise;
return result;
}
// Return reference()-ed result if success
async function doCompile(
compileTask: CompileTask,
compileTaskHash: string,
languageConfig: LanguageConfig<unknown>,
taskWorkingDirectory: string
): Promise<CompileResult> {
const { sourceFilename, binarySizeLimit } = languageConfig.getMetaOptions(compileTask.compileAndRunOptions);
const sourceDirectory: MappedPath = {
outside: safelyJoinPath(taskWorkingDirectory, "source"),
inside: SANDBOX_INSIDE_PATH_SOURCE
};
const binaryDirectory: MappedPath = {
outside: safelyJoinPath(taskWorkingDirectory, "working"),
inside: SANDBOX_INSIDE_PATH_BINARY
};
const tempDirectoryOutside = safelyJoinPath(taskWorkingDirectory, "temp");
await Promise.all([
ensureDirectoryEmpty(sourceDirectory.outside),
ensureDirectoryEmpty(binaryDirectory.outside),
ensureDirectoryEmpty(tempDirectoryOutside)
]);
await Promise.all(
Object.entries(compileTask.extraSourceFiles || {}).map(([dst, src]) =>
fs.promises.copyFile(getFile(src), safelyJoinPath(sourceDirectory.outside, dst))
)
);
const sourceFile = safelyJoinPath(sourceDirectory, sourceFilename);
await fs.promises.writeFile(sourceFile.outside, compileTask.code);
const compileConfig = languageConfig.compile({
sourceDirectoryInside: sourceDirectory.inside,
sourcePathInside: sourceFile.inside,
binaryDirectoryInside: binaryDirectory.inside,
compileAndRunOptions: compileTask.compileAndRunOptions
});
// The `taskId` parameter of `runSandbox` is just used to cancel the sandbox
// But compilation couldn't be cancelled since multiple submissions may share the same compilation
const sandboxResult = await runSandbox(null, {
...compileConfig,
tempDirectoryOutside,
extraMounts: [
{
mappedPath: sourceDirectory,
readOnly: true
},
{
mappedPath: binaryDirectory,
readOnly: false
}
]
});
const messageFile = safelyJoinPath(binaryDirectory, compileConfig.messageFile);
const extraInfoFile = compileConfig.extraInfoFile && safelyJoinPath(binaryDirectory, compileConfig.extraInfoFile);
const [message, extraInfo] = await Promise.all([
readFileOmitted(messageFile.outside, serverSideConfig.limit.compilerMessage).then(result => result || ""),
extraInfoFile
? fsNative
.exists(extraInfoFile.outside)
.then(exists => (exists ? fs.promises.readFile(extraInfoFile.outside, "utf-8") : null))
: null
]);
await Promise.all([
fsNative.remove(messageFile.outside),
extraInfoFile ? fsNative.remove(extraInfoFile.outside) : null
]);
if (sandboxResult.status === SandboxStatus.OK) {
if (sandboxResult.code === 0) {
const binaryDirectorySize = await fsNative.calcSize(binaryDirectory.outside);
if (binaryDirectorySize > binarySizeLimit) {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`The source code compiled to ${binaryDirectorySize} bytes, exceeding the size limit.\n\n`,
message,
true
)
};
} else if (binaryDirectorySize > config.binaryCacheMaxSize) {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`The source code compiled to ${binaryDirectorySize} bytes, exceeding the limit of cache storage.\n\n`,
message,
true
)
};
} else {
// We must done copying it to the cache before returning
// Since the initial compile result's directory is NOT preserved after returning to the task queue
return await compileResultCache.set(
compileTaskHash,
new CompileResultSuccess(compileTaskHash, message, binaryDirectory.outside, binaryDirectorySize, extraInfo)
);
}
} else {
return {
compileTaskHash,
success: false,
message
};
}
} else {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`A ${SandboxStatus[sandboxResult.status]} encountered while compiling the code.\n\n`,
message,
true
)
};
}
}
| {
this.referenceCount++;
return this;
} | identifier_body |
compile.ts | import fs from "fs";
import { SandboxStatus } from "simple-sandbox";
import objectHash from "object-hash";
import LruCache from "lru-cache";
import winston from "winston";
import { v4 as uuid } from "uuid";
import getLanguage, { LanguageConfig } from "./languages";
import { MappedPath, safelyJoinPath, ensureDirectoryEmpty } from "./utils";
import { readFileOmitted, OmittableString, prependOmittableString } from "./omittableString";
import {
SandboxConfigWithoutMountInfo,
runSandbox,
SANDBOX_INSIDE_PATH_SOURCE,
SANDBOX_INSIDE_PATH_BINARY
} from "./sandbox";
import config, { serverSideConfig } from "./config";
import { runTaskQueued } from "./taskQueue";
import { getFile, getFileHash } from "./file";
import * as fsNative from "./fsNative";
export interface CompilationConfig extends SandboxConfigWithoutMountInfo {
messageFile?: string; // The file contains the message to display for user (in the binary directory)
extraInfoFile?: string; // The file contains the extra information for running the compiled program (in the binary directory)
workingDirectory: string; // The working directory for the compiler or script
}
export interface CompileTask {
language: string;
code: string;
compileAndRunOptions: unknown;
extraSourceFiles?: Record<string, string>;
}
async function hashCompileTask(compileTask: CompileTask): Promise<string> {
return objectHash({
language: compileTask.language,
code: compileTask.code,
compileAndRunOptions: compileTask.compileAndRunOptions,
extraSourceFiles:
compileTask.extraSourceFiles &&
(await Promise.all(
Object.entries(compileTask.extraSourceFiles).map(async ([filename, fileUuid]) => [
filename,
await getFileHash(fileUuid)
])
))
});
}
export interface CompileResult {
compileTaskHash: string;
success: boolean;
message: OmittableString;
}
// These class implements reference count to prevent a compile result being deleted
// from the disk during using
export class CompileResultSuccess implements CompileResult {
public readonly success: true = true;
constructor(
public readonly compileTaskHash: string,
public readonly message: OmittableString,
public readonly binaryDirectory: string,
public readonly binaryDirectorySize: number,
public readonly extraInfo: string
) {}
// The referenceCount is initially zero, the result must be referenced at least once
// Then when dereferenced to zero it will be deleted from the disk
private referenceCount: number = 0;
public reference() {
this.referenceCount++;
return this;
}
public async | () {
if (--this.referenceCount === 0) {
await fsNative.remove(this.binaryDirectory);
}
}
async copyTo(newBinaryDirectory: string) {
this.reference();
await fsNative.copy(this.binaryDirectory, newBinaryDirectory);
await this.dereference();
return new CompileResultSuccess(
this.compileTaskHash,
this.message,
newBinaryDirectory,
this.binaryDirectorySize,
this.extraInfo
);
}
}
// Why NOT using the task hash as the directory name? Because there'll be a race condition
// If a compile result is disposed from the cache, but still have at least one reference
// e.g. referenced by a judge task which have not finished copying the binary files to its working directory
// Another cache set operation with the same task will overwrite the files (and may cause the judge task using a corrupted file)
// Use a random uuid as the key instead to prevent this
class CompileResultCache {
private readonly lruCache = new LruCache<string, CompileResultSuccess>({
max: config.binaryCacheMaxSize,
length: result => result.binaryDirectorySize,
dispose: (compileTaskHash, result) => {
winston.verbose(`dispose() from compile result cache: ${compileTaskHash}`);
setImmediate(() => {
// It's safe NOT to await it..
result.dereference().catch(e => winston.error(`Failed to remove compile result on evicting cache: ${e.stack}`));
});
}
});
// The set()/get()'s returned result is reference()-ed
// and must be dereference()-ed
public get(compileTaskHash: string): CompileResultSuccess {
if (this.lruCache.has(compileTaskHash)) return this.lruCache.get(compileTaskHash).reference();
return null;
}
// set() should not be called twice with the same compileTaskHash in the same time
// i.e. call another time with the same compileTaskHash before the previous finished
public async set(compileTaskHash: string, result: CompileResultSuccess): Promise<CompileResultSuccess> {
if (this.lruCache.has(compileTaskHash)) return this.lruCache.get(compileTaskHash).reference();
const newCompileResult = await result.copyTo(safelyJoinPath(config.binaryCacheStore, uuid()));
newCompileResult.reference();
this.lruCache.set(compileTaskHash, newCompileResult);
return newCompileResult.reference();
}
}
interface PendingCompileTask {
resultConsumers: ((compileResult: CompileResult) => void)[];
promise: Promise<void>;
}
// If there're multiple calls to compile() with the same compileTask, it's to prevent the task to be compiled multiple times
// compileTaskHash -> Promise of task
const pendingCompileTasks: Map<string, PendingCompileTask> = new Map();
const compileResultCache = new CompileResultCache();
export async function compile(compileTask: CompileTask): Promise<CompileResult> {
const languageConfig = getLanguage(compileTask.language);
const compileTaskHash = await hashCompileTask(compileTask);
const cachedResult = compileResultCache.get(compileTaskHash);
if (cachedResult) {
winston.verbose(`Use cached compile reslt for ${compileTaskHash}`);
return cachedResult;
}
let pendingCompileTask = pendingCompileTasks.get(compileTaskHash);
if (!pendingCompileTask) {
// Use a array of functions to ensure every calls to compile() of this task could get
// a valid CompileResultSuccess object (with positive referenceCount)
// I don't think "await promise" is guaranteed to return in a synchronous flow after the promise resolved
const resultConsumers = [];
pendingCompileTasks.set(
compileTaskHash,
(pendingCompileTask = {
resultConsumers,
promise: runTaskQueued(async taskWorkingDirectory => {
// The compileResult is already reference()-ed
const compileResult = await doCompile(compileTask, compileTaskHash, languageConfig, taskWorkingDirectory);
winston.verbose(`Compile result: ${JSON.stringify(compileResult)}`);
for (const resultConsumer of resultConsumers)
resultConsumer(compileResult instanceof CompileResultSuccess ? compileResult.reference() : compileResult);
if (compileResult instanceof CompileResultSuccess) await compileResult.dereference();
}).finally(() => pendingCompileTasks.delete(compileTaskHash))
})
);
}
let result: CompileResult;
pendingCompileTask.resultConsumers.push(r => {
result = r;
});
await pendingCompileTask.promise;
return result;
}
// Return reference()-ed result if success
async function doCompile(
compileTask: CompileTask,
compileTaskHash: string,
languageConfig: LanguageConfig<unknown>,
taskWorkingDirectory: string
): Promise<CompileResult> {
const { sourceFilename, binarySizeLimit } = languageConfig.getMetaOptions(compileTask.compileAndRunOptions);
const sourceDirectory: MappedPath = {
outside: safelyJoinPath(taskWorkingDirectory, "source"),
inside: SANDBOX_INSIDE_PATH_SOURCE
};
const binaryDirectory: MappedPath = {
outside: safelyJoinPath(taskWorkingDirectory, "working"),
inside: SANDBOX_INSIDE_PATH_BINARY
};
const tempDirectoryOutside = safelyJoinPath(taskWorkingDirectory, "temp");
await Promise.all([
ensureDirectoryEmpty(sourceDirectory.outside),
ensureDirectoryEmpty(binaryDirectory.outside),
ensureDirectoryEmpty(tempDirectoryOutside)
]);
await Promise.all(
Object.entries(compileTask.extraSourceFiles || {}).map(([dst, src]) =>
fs.promises.copyFile(getFile(src), safelyJoinPath(sourceDirectory.outside, dst))
)
);
const sourceFile = safelyJoinPath(sourceDirectory, sourceFilename);
await fs.promises.writeFile(sourceFile.outside, compileTask.code);
const compileConfig = languageConfig.compile({
sourceDirectoryInside: sourceDirectory.inside,
sourcePathInside: sourceFile.inside,
binaryDirectoryInside: binaryDirectory.inside,
compileAndRunOptions: compileTask.compileAndRunOptions
});
// The `taskId` parameter of `runSandbox` is just used to cancel the sandbox
// But compilation couldn't be cancelled since multiple submissions may share the same compilation
const sandboxResult = await runSandbox(null, {
...compileConfig,
tempDirectoryOutside,
extraMounts: [
{
mappedPath: sourceDirectory,
readOnly: true
},
{
mappedPath: binaryDirectory,
readOnly: false
}
]
});
const messageFile = safelyJoinPath(binaryDirectory, compileConfig.messageFile);
const extraInfoFile = compileConfig.extraInfoFile && safelyJoinPath(binaryDirectory, compileConfig.extraInfoFile);
const [message, extraInfo] = await Promise.all([
readFileOmitted(messageFile.outside, serverSideConfig.limit.compilerMessage).then(result => result || ""),
extraInfoFile
? fsNative
.exists(extraInfoFile.outside)
.then(exists => (exists ? fs.promises.readFile(extraInfoFile.outside, "utf-8") : null))
: null
]);
await Promise.all([
fsNative.remove(messageFile.outside),
extraInfoFile ? fsNative.remove(extraInfoFile.outside) : null
]);
if (sandboxResult.status === SandboxStatus.OK) {
if (sandboxResult.code === 0) {
const binaryDirectorySize = await fsNative.calcSize(binaryDirectory.outside);
if (binaryDirectorySize > binarySizeLimit) {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`The source code compiled to ${binaryDirectorySize} bytes, exceeding the size limit.\n\n`,
message,
true
)
};
} else if (binaryDirectorySize > config.binaryCacheMaxSize) {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`The source code compiled to ${binaryDirectorySize} bytes, exceeding the limit of cache storage.\n\n`,
message,
true
)
};
} else {
// We must done copying it to the cache before returning
// Since the initial compile result's directory is NOT preserved after returning to the task queue
return await compileResultCache.set(
compileTaskHash,
new CompileResultSuccess(compileTaskHash, message, binaryDirectory.outside, binaryDirectorySize, extraInfo)
);
}
} else {
return {
compileTaskHash,
success: false,
message
};
}
} else {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`A ${SandboxStatus[sandboxResult.status]} encountered while compiling the code.\n\n`,
message,
true
)
};
}
}
| dereference | identifier_name |
compile.ts | import fs from "fs";
import { SandboxStatus } from "simple-sandbox";
import objectHash from "object-hash";
import LruCache from "lru-cache";
import winston from "winston";
import { v4 as uuid } from "uuid";
import getLanguage, { LanguageConfig } from "./languages";
import { MappedPath, safelyJoinPath, ensureDirectoryEmpty } from "./utils";
import { readFileOmitted, OmittableString, prependOmittableString } from "./omittableString";
import {
SandboxConfigWithoutMountInfo,
runSandbox,
SANDBOX_INSIDE_PATH_SOURCE,
SANDBOX_INSIDE_PATH_BINARY
} from "./sandbox";
import config, { serverSideConfig } from "./config";
import { runTaskQueued } from "./taskQueue";
import { getFile, getFileHash } from "./file";
import * as fsNative from "./fsNative";
export interface CompilationConfig extends SandboxConfigWithoutMountInfo {
messageFile?: string; // The file contains the message to display for user (in the binary directory)
extraInfoFile?: string; // The file contains the extra information for running the compiled program (in the binary directory)
workingDirectory: string; // The working directory for the compiler or script
}
export interface CompileTask {
language: string;
code: string;
compileAndRunOptions: unknown;
extraSourceFiles?: Record<string, string>;
}
async function hashCompileTask(compileTask: CompileTask): Promise<string> {
return objectHash({
language: compileTask.language,
code: compileTask.code,
compileAndRunOptions: compileTask.compileAndRunOptions,
extraSourceFiles:
compileTask.extraSourceFiles &&
(await Promise.all(
Object.entries(compileTask.extraSourceFiles).map(async ([filename, fileUuid]) => [
filename,
await getFileHash(fileUuid)
])
))
});
}
export interface CompileResult {
compileTaskHash: string;
success: boolean;
message: OmittableString;
}
// These class implements reference count to prevent a compile result being deleted
// from the disk during using
export class CompileResultSuccess implements CompileResult {
public readonly success: true = true;
constructor(
public readonly compileTaskHash: string,
public readonly message: OmittableString,
public readonly binaryDirectory: string,
public readonly binaryDirectorySize: number,
public readonly extraInfo: string
) {}
// The referenceCount is initially zero, the result must be referenced at least once
// Then when dereferenced to zero it will be deleted from the disk
private referenceCount: number = 0;
public reference() {
this.referenceCount++;
return this;
}
public async dereference() {
if (--this.referenceCount === 0) {
await fsNative.remove(this.binaryDirectory);
}
} | await fsNative.copy(this.binaryDirectory, newBinaryDirectory);
await this.dereference();
return new CompileResultSuccess(
this.compileTaskHash,
this.message,
newBinaryDirectory,
this.binaryDirectorySize,
this.extraInfo
);
}
}
// Why NOT using the task hash as the directory name? Because there'll be a race condition
// If a compile result is disposed from the cache, but still have at least one reference
// e.g. referenced by a judge task which have not finished copying the binary files to its working directory
// Another cache set operation with the same task will overwrite the files (and may cause the judge task using a corrupted file)
// Use a random uuid as the key instead to prevent this
class CompileResultCache {
private readonly lruCache = new LruCache<string, CompileResultSuccess>({
max: config.binaryCacheMaxSize,
length: result => result.binaryDirectorySize,
dispose: (compileTaskHash, result) => {
winston.verbose(`dispose() from compile result cache: ${compileTaskHash}`);
setImmediate(() => {
// It's safe NOT to await it..
result.dereference().catch(e => winston.error(`Failed to remove compile result on evicting cache: ${e.stack}`));
});
}
});
// The set()/get()'s returned result is reference()-ed
// and must be dereference()-ed
public get(compileTaskHash: string): CompileResultSuccess {
if (this.lruCache.has(compileTaskHash)) return this.lruCache.get(compileTaskHash).reference();
return null;
}
// set() should not be called twice with the same compileTaskHash in the same time
// i.e. call another time with the same compileTaskHash before the previous finished
public async set(compileTaskHash: string, result: CompileResultSuccess): Promise<CompileResultSuccess> {
if (this.lruCache.has(compileTaskHash)) return this.lruCache.get(compileTaskHash).reference();
const newCompileResult = await result.copyTo(safelyJoinPath(config.binaryCacheStore, uuid()));
newCompileResult.reference();
this.lruCache.set(compileTaskHash, newCompileResult);
return newCompileResult.reference();
}
}
interface PendingCompileTask {
resultConsumers: ((compileResult: CompileResult) => void)[];
promise: Promise<void>;
}
// If there're multiple calls to compile() with the same compileTask, it's to prevent the task to be compiled multiple times
// compileTaskHash -> Promise of task
const pendingCompileTasks: Map<string, PendingCompileTask> = new Map();
const compileResultCache = new CompileResultCache();
export async function compile(compileTask: CompileTask): Promise<CompileResult> {
const languageConfig = getLanguage(compileTask.language);
const compileTaskHash = await hashCompileTask(compileTask);
const cachedResult = compileResultCache.get(compileTaskHash);
if (cachedResult) {
winston.verbose(`Use cached compile reslt for ${compileTaskHash}`);
return cachedResult;
}
let pendingCompileTask = pendingCompileTasks.get(compileTaskHash);
if (!pendingCompileTask) {
// Use a array of functions to ensure every calls to compile() of this task could get
// a valid CompileResultSuccess object (with positive referenceCount)
// I don't think "await promise" is guaranteed to return in a synchronous flow after the promise resolved
const resultConsumers = [];
pendingCompileTasks.set(
compileTaskHash,
(pendingCompileTask = {
resultConsumers,
promise: runTaskQueued(async taskWorkingDirectory => {
// The compileResult is already reference()-ed
const compileResult = await doCompile(compileTask, compileTaskHash, languageConfig, taskWorkingDirectory);
winston.verbose(`Compile result: ${JSON.stringify(compileResult)}`);
for (const resultConsumer of resultConsumers)
resultConsumer(compileResult instanceof CompileResultSuccess ? compileResult.reference() : compileResult);
if (compileResult instanceof CompileResultSuccess) await compileResult.dereference();
}).finally(() => pendingCompileTasks.delete(compileTaskHash))
})
);
}
let result: CompileResult;
pendingCompileTask.resultConsumers.push(r => {
result = r;
});
await pendingCompileTask.promise;
return result;
}
// Return reference()-ed result if success
async function doCompile(
compileTask: CompileTask,
compileTaskHash: string,
languageConfig: LanguageConfig<unknown>,
taskWorkingDirectory: string
): Promise<CompileResult> {
const { sourceFilename, binarySizeLimit } = languageConfig.getMetaOptions(compileTask.compileAndRunOptions);
const sourceDirectory: MappedPath = {
outside: safelyJoinPath(taskWorkingDirectory, "source"),
inside: SANDBOX_INSIDE_PATH_SOURCE
};
const binaryDirectory: MappedPath = {
outside: safelyJoinPath(taskWorkingDirectory, "working"),
inside: SANDBOX_INSIDE_PATH_BINARY
};
const tempDirectoryOutside = safelyJoinPath(taskWorkingDirectory, "temp");
await Promise.all([
ensureDirectoryEmpty(sourceDirectory.outside),
ensureDirectoryEmpty(binaryDirectory.outside),
ensureDirectoryEmpty(tempDirectoryOutside)
]);
await Promise.all(
Object.entries(compileTask.extraSourceFiles || {}).map(([dst, src]) =>
fs.promises.copyFile(getFile(src), safelyJoinPath(sourceDirectory.outside, dst))
)
);
const sourceFile = safelyJoinPath(sourceDirectory, sourceFilename);
await fs.promises.writeFile(sourceFile.outside, compileTask.code);
const compileConfig = languageConfig.compile({
sourceDirectoryInside: sourceDirectory.inside,
sourcePathInside: sourceFile.inside,
binaryDirectoryInside: binaryDirectory.inside,
compileAndRunOptions: compileTask.compileAndRunOptions
});
// The `taskId` parameter of `runSandbox` is just used to cancel the sandbox
// But compilation couldn't be cancelled since multiple submissions may share the same compilation
const sandboxResult = await runSandbox(null, {
...compileConfig,
tempDirectoryOutside,
extraMounts: [
{
mappedPath: sourceDirectory,
readOnly: true
},
{
mappedPath: binaryDirectory,
readOnly: false
}
]
});
const messageFile = safelyJoinPath(binaryDirectory, compileConfig.messageFile);
const extraInfoFile = compileConfig.extraInfoFile && safelyJoinPath(binaryDirectory, compileConfig.extraInfoFile);
const [message, extraInfo] = await Promise.all([
readFileOmitted(messageFile.outside, serverSideConfig.limit.compilerMessage).then(result => result || ""),
extraInfoFile
? fsNative
.exists(extraInfoFile.outside)
.then(exists => (exists ? fs.promises.readFile(extraInfoFile.outside, "utf-8") : null))
: null
]);
await Promise.all([
fsNative.remove(messageFile.outside),
extraInfoFile ? fsNative.remove(extraInfoFile.outside) : null
]);
if (sandboxResult.status === SandboxStatus.OK) {
if (sandboxResult.code === 0) {
const binaryDirectorySize = await fsNative.calcSize(binaryDirectory.outside);
if (binaryDirectorySize > binarySizeLimit) {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`The source code compiled to ${binaryDirectorySize} bytes, exceeding the size limit.\n\n`,
message,
true
)
};
} else if (binaryDirectorySize > config.binaryCacheMaxSize) {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`The source code compiled to ${binaryDirectorySize} bytes, exceeding the limit of cache storage.\n\n`,
message,
true
)
};
} else {
// We must done copying it to the cache before returning
// Since the initial compile result's directory is NOT preserved after returning to the task queue
return await compileResultCache.set(
compileTaskHash,
new CompileResultSuccess(compileTaskHash, message, binaryDirectory.outside, binaryDirectorySize, extraInfo)
);
}
} else {
return {
compileTaskHash,
success: false,
message
};
}
} else {
return {
compileTaskHash,
success: false,
message: prependOmittableString(
`A ${SandboxStatus[sandboxResult.status]} encountered while compiling the code.\n\n`,
message,
true
)
};
}
} |
async copyTo(newBinaryDirectory: string) {
this.reference(); | random_line_split |
supervised_ml_(classification)_assignment_(final).py | # -*- coding: utf-8 -*-
"""Supervised_ML_(Classification)_assignment_(final)
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dt_czoLEqYxIoCA-v7Ynu0XHCWHnFWsB
"""
#import of libraries
import pandas as pd
import glob
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report, roc_curve, precision_score, recall_score
from io import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings('ignore')
"""---
# Supervised Machine Learning (Classification) on Heart Attack Data
The follow assignment will explore Heart Attack Data and explore 3 separate models to assess the accuracy of predicting heart attack. The dataset used in this task can be found at; https://www.kaggle.com/rashikrahmanpritom/heart-attack-analysis-prediction-dataset
Features:
* Age : Age of the patient
* Sex : Sex of the patient
* exang: exercise induced angina (1 = yes; 0 = no)
* ca: number of major vessels (0-3)
* cp : Chest Pain type chest pain type :
* cp Value 1: typical angina
* cp Value 2: atypical angina
* cp Value 3: non-anginal pain
* cp Value 4: asymptomatic
* trtbps : resting blood pressure (in mm Hg)
* chol : cholesterol in mg/dl fetched via BMI sensor
* fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)
* rest_ecg : resting electrocardiographic results :
* Value 0: normal
* Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
* Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
* thalach : maximum heart rate achieved
* target : 0= less chance of heart attack 1= more chance of heart attack
Let's begin by importing our data into pandas.
"""
#path must point to the heart.csv file.
data = pd.read_csv('/content/heart.csv', index_col=None, header=0)
data.head()
data.shape
"""---
# Data Cleaning
Before we start exploring our dataset, lets first do some simple checks to see if we need to do anything. Below, we shall check our datatypes, see if there are any missing values in each row, and check for any duplicate values.
"""
#Count missing values in each column.
data.isna().sum()
#duplicate values check
data.duplicated().sum()
data.loc[data.duplicated(keep=False),:]
"""As we can see, we are primarily working with "Int"-type values. This means we do not require any more work to change the features of the data. We also have no absent attributes in each row, but we have discovered duplicate values. All we need to do is just drop this value and continue forward."""
#drop duplicates
data.drop_duplicates(keep='first', inplace=True)
"""---
# Data Exploration
Let’s look at our data and see any noticeable correlations. The below table describes how our data is distributes per each column. Though, this doesnt represent much use since some of the attributes are numerical encodings and aren’t represented well like this.
In order to see correlations, lets visually interpret this data through a heatmap of correlated values, and a pair plot to draw visual inferences between columns.
"""
#Description of out dataset, rounded to two decimal places
data.describe().round(2)
data_corr = data.corr()
data_corr
plt.figure(figsize=(8,6))
sns.set_context('paper')
sns.heatmap(data_corr, annot=True, cmap='Blues', fmt='.0%')
"""From the above correlation plot we can see correlations between "output" and the following:
* cp (43%)
* thalachh (42%)
* slp (34%)
The above attributes represent the highest correlated attributes, thought there are some notable correlations listed below:
* age and caa, trtbps
* cp and thalachh
* chol and age
* thalachh and slp, cp
* exng and oldpeak
* oldpeak and caa
* thall and exnp, oldpeak, age
"""
#broad look at data distribution
sns.pairplot(data)
"""The below plots depict the density of data in accordance with what would be a high likely hood and a low likelihood of heart attack risk. This gives insight to the shape of the data given the output of events."""
X = data.drop('output',axis=1)
y = data['output']
riskyDF = data[y == 1]
safeDF = data[y == 0]
for col in data.select_dtypes(include=['float64','int64']):
plt.figure(figsize=(4,4))
sns.distplot(riskyDF[col],label='High Risk')
sns.distplot(safeDF[col],label='Low Risk')
plt.legend()
plt.show()
"""---
# Data Engineering/Modelling
Because the data is already in a numerical form (int-type), it will not be required to engineer the data or reencode values. Though, given the tasks ahead, we may require data scaling for input into specific classifier models.
We shall address this problem as we arrive to it. But for now, we can get to creating our train-test split.
"""
#train test splitting
y = data['output']
x = data.drop('output', axis=1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state=42)
"""---
# Classifier Model 1: KNN
First let’s explore using K-Nearest Neighbour (KNN) algorithm. The KNN algorithm assumes that similar things exist in proximity. In other words, similar things are near to each other. KNN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most like the available categories.
For this algorithm to work we need to scale the test/train data. To do this, I shall generate a pipeline. This will allow scaling the train/test data without taking up additional resources or modifying the original test/train split, as well as keep the use of our train/test split uniform across the other two classification models.
"""
KNN_pipeline = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier())])
KNN_pipeline.fit(x_train, y_train)
KNN_pipeline.score(x_test, y_test)
y_proba = KNN_pipeline.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""Can we increase our accuracy of our KNN Classifier?! Let's find out below by running the pipeline through a for-loop, increasing the number of neighbours for selection, and plotting the accuracy out to see the change in value."""
err = []
for i in range(1, 40):
model = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = i))])
model.fit(x_train, y_train)
pred_i = model.predict(x_test)
err.append(np.mean(pred_i != y_test))
plt.figure(figsize =(10, 8))
plt.plot(range(1, 40), err, color ='blue',
linestyle ='dashed', marker ='o',
markerfacecolor ='blue', markersize = 8)
plt.title('Mean Err = f(K)')
plt.xlabel('K')
plt.ylabel('Mean Err')
"""The above output suggests that a value of "5" should be the most optimized value."""
KNN_pipeline_Opt = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = 5))])
KNN_pipeline_Opt.fit(x_train, y_train)
y_proba = KNN_pipeline_Opt.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""However, this seems to be negligible since the resultant accuracy has no increased any more than the first model.
---
# Classifier Model 2: Decision Tree Classification
A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. It is one way to display an algorithm that only contains conditional control statements.
However for this use case, we are going to use it to classify attributes to create a process of decision to predict a patients possibility of a heart attack.
"""
dt = DecisionTreeClassifier(random_state=42)
dt = dt.fit(x_train, y_train)
dt.tree_.node_count, dt.tree_.max_depth
# The error on the training and test data sets (Taken from workbook)
y_train_pred = dt.predict(x_train)
y_test_pred = dt.predict(x_test)
def measure_error(y_true, y_pred, label):
return pd.Series({'accuracy':accuracy_score(y_true, y_pred),
'precision': precision_score(y_true, y_pred),
'recall': recall_score(y_true, y_pred),
'f1': f1_score(y_true, y_pred)},
name=label)
train_test_full_error = pd.concat([measure_error(y_train, y_train_pred, 'train'),
measure_error(y_test, y_test_pred, 'test')],
axis=1)
train_test_full_error
"""The above output shows out accuracy prediction. This is quite low, could it be improved with Grid Search Cross Validation (GSCV)?! Let's find out below:"""
# Grid Search optimization
param_grid = {'max_depth':range(1, dt.tree_.max_depth+1, 2), 'max_features': range(1, len(dt.feature_importances_)+1)}
GR = GridSearchCV(DecisionTreeClassifier(random_state=42), param_grid=param_grid, scoring='accuracy', n_jobs=-1)
GR = GR.fit(x_train, y_train)
GR.best_estimator_.tree_.node_count, GR.best_estimator_.tree_.max_depth
y_train_pred_gr = GR.predict(x_train)
y_test_pred_gr = GR.predict(x_test)
train_test_gr_error = pd.concat([measure_error(y_train, y_train_pred_gr, 'train'),
measure_error(y_test, y_test_pred_gr, 'test')],
axis=1)
train_test_gr_error
"""We have increased the accuracy by 0.01 but at the cost of our precision. Also, our recall has increased significantly. Let’s look at the two different trees:"""
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(dt, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_1.png'
graph.write_png(filename)
Image(filename=filename)
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(GR.best_estimator_, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_2.png'
graph.write_png(filename)
Image(filename=filename)
### END SOLUTION
"""---
# Classifier Model 3: Random Forest Classification
Random forest is an ensemble machine learning algorithm. A forest is comprised of trees. It is said that the more trees it has, the more robust a forest is. Random forests create decision trees on randomly selected data samples, gets prediction from each tree and selects the best solution by means of voting. It also provides a pretty good indicator of the feature importance. Let’s see how well our heart attack data is classified by Random Forest.
"""
# Initialize the random forest estimator
# Note that the number of trees is not setup here
RF = RandomForestClassifier(oob_score=True, random_state=42, warm_start=True, n_jobs=-1)
oob_list = list()
# Iterate through all of the possibilities for
# number of trees
for n_trees in [15, 20, 30, 40, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000]:
# Use this to set the number of trees
RF.set_par | f = pd.concat(oob_list, axis=1).T.set_index('n_trees')
ax = rf_oob_df.plot(legend=False, marker='x', figsize=(14, 7), linewidth=5)
ax.set(ylabel='out-of-bag error');
"""The key is to reduce our out-of-bag (OOB) error. We do this by increasing the number of possibilities and finding the possibility which produced the lowest OOB score. In this case, the lowest score is "700""""
from sklearn.metrics import f1_score, roc_auc_score
# Random forest with 700 estimators
model = RF.set_params(n_estimators=700)
y_pred = model.predict(x_test)
cr = classification_report(y_test, y_pred)
print(cr)
score_df = pd.DataFrame({'accuracy': accuracy_score(y_test, y_pred),
'precision': precision_score(y_test, y_pred),
'recall': recall_score(y_test, y_pred),
'f1': f1_score(y_test, y_pred),
'auc': roc_auc_score(y_test, y_pred)},
index=pd.Index([0]))
print(score_df)
"""Picking our most optimised number (700), we achieve an accuracy of 0.85 (rounded up). Let’s visually see how this is reflected in the ROC curve and precision/recall curve."""
sns.set_context('talk')
fig, axList = plt.subplots(ncols=2)
fig.set_size_inches(16, 8)
# Get the probabilities for each of the two categories
y_prob = model.predict_proba(x_test)
# Plot the ROC-AUC curve
ax = axList[0]
fpr, tpr, thresholds = roc_curve(y_test, y_prob[:,1])
ax.plot(fpr, tpr, linewidth=5)
# It is customary to draw a diagonal dotted line in ROC plots.
# This is to indicate completely random prediction. Deviation from this
# dotted line towards the upper left corner signifies the power of the model.
ax.plot([0, 1], [0, 1], ls='--', color='black', lw=.3)
ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='ROC curve')
ax.grid(True)
# Plot the precision-recall curve
ax = axList[1]
precision, recall, _ = precision_recall_curve(y_test, y_prob[:,1])
ax.plot(recall, precision, linewidth=5)
ax.set(xlabel='Recall', ylabel='Precision', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='Precision-Recall curve')
ax.grid(True)
plt.tight_layout()
"""Looking at the ROC curve, the true positive rate begins to severely fall off as the precision exceeds 8.0 resulting in a 60% hance if the true positive rate is 100% of a false positive rate.
Similarly, to the precision/recall curve. Anything above a 0.7 in the recall leads to a sharp drop off in the precision rate.
---
# Summary/Key Findings/Next Steps
To summarize, this task has explored Heart Attack data and the accuracy of three different classification models to measure predictability of a heart attack. We have explored:
* KNN (accuracy: 0.8571428571428571)
* Decision Tree's (accuracy: 0.835165 given the test data and after GSCV)
* Random Forests (accuracy: 0.846154)
Given the scores of accuracies, I beleive a KNN model is of best fit for the data. However, to accommodate for whats easier to demonstrate explainability of the model, I do feel that decision tree's best fit this case as there is an easy visual representation to the structure of the decision-making process.
Precision was a mixed batch, given if recall was a factor, Decision tree's with GSCV optimisation provided the highest Recall but compromises with precision. Ultimately you want a system with high precision and high recall which return many results, with all results labelled correctly.
""" | ams(n_estimators=n_trees)
# Fit the model
RF.fit(x_train, y_train)
# Get the oob error
oob_error = 1 - RF.oob_score_
# Store it
oob_list.append(pd.Series({'n_trees': n_trees, 'oob': oob_error}))
rf_oob_d | conditional_block |
supervised_ml_(classification)_assignment_(final).py | # -*- coding: utf-8 -*-
"""Supervised_ML_(Classification)_assignment_(final)
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dt_czoLEqYxIoCA-v7Ynu0XHCWHnFWsB
"""
#import of libraries
import pandas as pd
import glob
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report, roc_curve, precision_score, recall_score
from io import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings('ignore')
"""---
# Supervised Machine Learning (Classification) on Heart Attack Data
The follow assignment will explore Heart Attack Data and explore 3 separate models to assess the accuracy of predicting heart attack. The dataset used in this task can be found at; https://www.kaggle.com/rashikrahmanpritom/heart-attack-analysis-prediction-dataset
Features:
* Age : Age of the patient
* Sex : Sex of the patient
* exang: exercise induced angina (1 = yes; 0 = no)
* ca: number of major vessels (0-3)
* cp : Chest Pain type chest pain type :
* cp Value 1: typical angina
* cp Value 2: atypical angina
* cp Value 3: non-anginal pain
* cp Value 4: asymptomatic
* trtbps : resting blood pressure (in mm Hg)
* chol : cholesterol in mg/dl fetched via BMI sensor
* fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)
* rest_ecg : resting electrocardiographic results :
* Value 0: normal
* Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
* Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
* thalach : maximum heart rate achieved
* target : 0= less chance of heart attack 1= more chance of heart attack
Let's begin by importing our data into pandas.
"""
#path must point to the heart.csv file.
data = pd.read_csv('/content/heart.csv', index_col=None, header=0)
data.head()
data.shape
"""---
# Data Cleaning
Before we start exploring our dataset, lets first do some simple checks to see if we need to do anything. Below, we shall check our datatypes, see if there are any missing values in each row, and check for any duplicate values.
"""
#Count missing values in each column.
data.isna().sum()
#duplicate values check
data.duplicated().sum()
data.loc[data.duplicated(keep=False),:]
"""As we can see, we are primarily working with "Int"-type values. This means we do not require any more work to change the features of the data. We also have no absent attributes in each row, but we have discovered duplicate values. All we need to do is just drop this value and continue forward."""
#drop duplicates
data.drop_duplicates(keep='first', inplace=True)
"""---
# Data Exploration
Let’s look at our data and see any noticeable correlations. The below table describes how our data is distributes per each column. Though, this doesnt represent much use since some of the attributes are numerical encodings and aren’t represented well like this.
In order to see correlations, lets visually interpret this data through a heatmap of correlated values, and a pair plot to draw visual inferences between columns.
"""
#Description of out dataset, rounded to two decimal places
data.describe().round(2)
data_corr = data.corr()
data_corr
plt.figure(figsize=(8,6))
sns.set_context('paper')
sns.heatmap(data_corr, annot=True, cmap='Blues', fmt='.0%')
"""From the above correlation plot we can see correlations between "output" and the following:
* cp (43%)
* thalachh (42%)
* slp (34%)
The above attributes represent the highest correlated attributes, thought there are some notable correlations listed below:
* age and caa, trtbps
* cp and thalachh
* chol and age
* thalachh and slp, cp
* exng and oldpeak
* oldpeak and caa
* thall and exnp, oldpeak, age
"""
#broad look at data distribution
sns.pairplot(data)
"""The below plots depict the density of data in accordance with what would be a high likely hood and a low likelihood of heart attack risk. This gives insight to the shape of the data given the output of events."""
X = data.drop('output',axis=1)
y = data['output']
riskyDF = data[y == 1]
safeDF = data[y == 0]
for col in data.select_dtypes(include=['float64','int64']):
plt.figure(figsize=(4,4))
sns.distplot(riskyDF[col],label='High Risk')
sns.distplot(safeDF[col],label='Low Risk')
plt.legend() | plt.show()
"""---
# Data Engineering/Modelling
Because the data is already in a numerical form (int-type), it will not be required to engineer the data or reencode values. Though, given the tasks ahead, we may require data scaling for input into specific classifier models.
We shall address this problem as we arrive to it. But for now, we can get to creating our train-test split.
"""
#train test splitting
y = data['output']
x = data.drop('output', axis=1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state=42)
"""---
# Classifier Model 1: KNN
First let’s explore using K-Nearest Neighbour (KNN) algorithm. The KNN algorithm assumes that similar things exist in proximity. In other words, similar things are near to each other. KNN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most like the available categories.
For this algorithm to work we need to scale the test/train data. To do this, I shall generate a pipeline. This will allow scaling the train/test data without taking up additional resources or modifying the original test/train split, as well as keep the use of our train/test split uniform across the other two classification models.
"""
KNN_pipeline = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier())])
KNN_pipeline.fit(x_train, y_train)
KNN_pipeline.score(x_test, y_test)
y_proba = KNN_pipeline.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""Can we increase our accuracy of our KNN Classifier?! Let's find out below by running the pipeline through a for-loop, increasing the number of neighbours for selection, and plotting the accuracy out to see the change in value."""
err = []
for i in range(1, 40):
model = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = i))])
model.fit(x_train, y_train)
pred_i = model.predict(x_test)
err.append(np.mean(pred_i != y_test))
plt.figure(figsize =(10, 8))
plt.plot(range(1, 40), err, color ='blue',
linestyle ='dashed', marker ='o',
markerfacecolor ='blue', markersize = 8)
plt.title('Mean Err = f(K)')
plt.xlabel('K')
plt.ylabel('Mean Err')
"""The above output suggests that a value of "5" should be the most optimized value."""
KNN_pipeline_Opt = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = 5))])
KNN_pipeline_Opt.fit(x_train, y_train)
y_proba = KNN_pipeline_Opt.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""However, this seems to be negligible since the resultant accuracy has no increased any more than the first model.
---
# Classifier Model 2: Decision Tree Classification
A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. It is one way to display an algorithm that only contains conditional control statements.
However for this use case, we are going to use it to classify attributes to create a process of decision to predict a patients possibility of a heart attack.
"""
dt = DecisionTreeClassifier(random_state=42)
dt = dt.fit(x_train, y_train)
dt.tree_.node_count, dt.tree_.max_depth
# The error on the training and test data sets (Taken from workbook)
y_train_pred = dt.predict(x_train)
y_test_pred = dt.predict(x_test)
def measure_error(y_true, y_pred, label):
return pd.Series({'accuracy':accuracy_score(y_true, y_pred),
'precision': precision_score(y_true, y_pred),
'recall': recall_score(y_true, y_pred),
'f1': f1_score(y_true, y_pred)},
name=label)
train_test_full_error = pd.concat([measure_error(y_train, y_train_pred, 'train'),
measure_error(y_test, y_test_pred, 'test')],
axis=1)
train_test_full_error
"""The above output shows out accuracy prediction. This is quite low, could it be improved with Grid Search Cross Validation (GSCV)?! Let's find out below:"""
# Grid Search optimization
param_grid = {'max_depth':range(1, dt.tree_.max_depth+1, 2), 'max_features': range(1, len(dt.feature_importances_)+1)}
GR = GridSearchCV(DecisionTreeClassifier(random_state=42), param_grid=param_grid, scoring='accuracy', n_jobs=-1)
GR = GR.fit(x_train, y_train)
GR.best_estimator_.tree_.node_count, GR.best_estimator_.tree_.max_depth
y_train_pred_gr = GR.predict(x_train)
y_test_pred_gr = GR.predict(x_test)
train_test_gr_error = pd.concat([measure_error(y_train, y_train_pred_gr, 'train'),
measure_error(y_test, y_test_pred_gr, 'test')],
axis=1)
train_test_gr_error
"""We have increased the accuracy by 0.01 but at the cost of our precision. Also, our recall has increased significantly. Let’s look at the two different trees:"""
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(dt, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_1.png'
graph.write_png(filename)
Image(filename=filename)
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(GR.best_estimator_, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_2.png'
graph.write_png(filename)
Image(filename=filename)
### END SOLUTION
"""---
# Classifier Model 3: Random Forest Classification
Random forest is an ensemble machine learning algorithm. A forest is comprised of trees. It is said that the more trees it has, the more robust a forest is. Random forests create decision trees on randomly selected data samples, gets prediction from each tree and selects the best solution by means of voting. It also provides a pretty good indicator of the feature importance. Let’s see how well our heart attack data is classified by Random Forest.
"""
# Initialize the random forest estimator
# Note that the number of trees is not setup here
RF = RandomForestClassifier(oob_score=True, random_state=42, warm_start=True, n_jobs=-1)
oob_list = list()
# Iterate through all of the possibilities for
# number of trees
for n_trees in [15, 20, 30, 40, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000]:
# Use this to set the number of trees
RF.set_params(n_estimators=n_trees)
# Fit the model
RF.fit(x_train, y_train)
# Get the oob error
oob_error = 1 - RF.oob_score_
# Store it
oob_list.append(pd.Series({'n_trees': n_trees, 'oob': oob_error}))
rf_oob_df = pd.concat(oob_list, axis=1).T.set_index('n_trees')
ax = rf_oob_df.plot(legend=False, marker='x', figsize=(14, 7), linewidth=5)
ax.set(ylabel='out-of-bag error');
"""The key is to reduce our out-of-bag (OOB) error. We do this by increasing the number of possibilities and finding the possibility which produced the lowest OOB score. In this case, the lowest score is "700""""
from sklearn.metrics import f1_score, roc_auc_score
# Random forest with 700 estimators
model = RF.set_params(n_estimators=700)
y_pred = model.predict(x_test)
cr = classification_report(y_test, y_pred)
print(cr)
score_df = pd.DataFrame({'accuracy': accuracy_score(y_test, y_pred),
'precision': precision_score(y_test, y_pred),
'recall': recall_score(y_test, y_pred),
'f1': f1_score(y_test, y_pred),
'auc': roc_auc_score(y_test, y_pred)},
index=pd.Index([0]))
print(score_df)
"""Picking our most optimised number (700), we achieve an accuracy of 0.85 (rounded up). Let’s visually see how this is reflected in the ROC curve and precision/recall curve."""
sns.set_context('talk')
fig, axList = plt.subplots(ncols=2)
fig.set_size_inches(16, 8)
# Get the probabilities for each of the two categories
y_prob = model.predict_proba(x_test)
# Plot the ROC-AUC curve
ax = axList[0]
fpr, tpr, thresholds = roc_curve(y_test, y_prob[:,1])
ax.plot(fpr, tpr, linewidth=5)
# It is customary to draw a diagonal dotted line in ROC plots.
# This is to indicate completely random prediction. Deviation from this
# dotted line towards the upper left corner signifies the power of the model.
ax.plot([0, 1], [0, 1], ls='--', color='black', lw=.3)
ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='ROC curve')
ax.grid(True)
# Plot the precision-recall curve
ax = axList[1]
precision, recall, _ = precision_recall_curve(y_test, y_prob[:,1])
ax.plot(recall, precision, linewidth=5)
ax.set(xlabel='Recall', ylabel='Precision', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='Precision-Recall curve')
ax.grid(True)
plt.tight_layout()
"""Looking at the ROC curve, the true positive rate begins to severely fall off as the precision exceeds 8.0 resulting in a 60% hance if the true positive rate is 100% of a false positive rate.
Similarly, to the precision/recall curve. Anything above a 0.7 in the recall leads to a sharp drop off in the precision rate.
---
# Summary/Key Findings/Next Steps
To summarize, this task has explored Heart Attack data and the accuracy of three different classification models to measure predictability of a heart attack. We have explored:
* KNN (accuracy: 0.8571428571428571)
* Decision Tree's (accuracy: 0.835165 given the test data and after GSCV)
* Random Forests (accuracy: 0.846154)
Given the scores of accuracies, I beleive a KNN model is of best fit for the data. However, to accommodate for whats easier to demonstrate explainability of the model, I do feel that decision tree's best fit this case as there is an easy visual representation to the structure of the decision-making process.
Precision was a mixed batch, given if recall was a factor, Decision tree's with GSCV optimisation provided the highest Recall but compromises with precision. Ultimately you want a system with high precision and high recall which return many results, with all results labelled correctly.
""" | random_line_split | |
supervised_ml_(classification)_assignment_(final).py | # -*- coding: utf-8 -*-
"""Supervised_ML_(Classification)_assignment_(final)
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dt_czoLEqYxIoCA-v7Ynu0XHCWHnFWsB
"""
#import of libraries
import pandas as pd
import glob
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report, roc_curve, precision_score, recall_score
from io import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings('ignore')
"""---
# Supervised Machine Learning (Classification) on Heart Attack Data
The follow assignment will explore Heart Attack Data and explore 3 separate models to assess the accuracy of predicting heart attack. The dataset used in this task can be found at; https://www.kaggle.com/rashikrahmanpritom/heart-attack-analysis-prediction-dataset
Features:
* Age : Age of the patient
* Sex : Sex of the patient
* exang: exercise induced angina (1 = yes; 0 = no)
* ca: number of major vessels (0-3)
* cp : Chest Pain type chest pain type :
* cp Value 1: typical angina
* cp Value 2: atypical angina
* cp Value 3: non-anginal pain
* cp Value 4: asymptomatic
* trtbps : resting blood pressure (in mm Hg)
* chol : cholesterol in mg/dl fetched via BMI sensor
* fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)
* rest_ecg : resting electrocardiographic results :
* Value 0: normal
* Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
* Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
* thalach : maximum heart rate achieved
* target : 0= less chance of heart attack 1= more chance of heart attack
Let's begin by importing our data into pandas.
"""
#path must point to the heart.csv file.
data = pd.read_csv('/content/heart.csv', index_col=None, header=0)
data.head()
data.shape
"""---
# Data Cleaning
Before we start exploring our dataset, lets first do some simple checks to see if we need to do anything. Below, we shall check our datatypes, see if there are any missing values in each row, and check for any duplicate values.
"""
#Count missing values in each column.
data.isna().sum()
#duplicate values check
data.duplicated().sum()
data.loc[data.duplicated(keep=False),:]
"""As we can see, we are primarily working with "Int"-type values. This means we do not require any more work to change the features of the data. We also have no absent attributes in each row, but we have discovered duplicate values. All we need to do is just drop this value and continue forward."""
#drop duplicates
data.drop_duplicates(keep='first', inplace=True)
"""---
# Data Exploration
Let’s look at our data and see any noticeable correlations. The below table describes how our data is distributes per each column. Though, this doesnt represent much use since some of the attributes are numerical encodings and aren’t represented well like this.
In order to see correlations, lets visually interpret this data through a heatmap of correlated values, and a pair plot to draw visual inferences between columns.
"""
#Description of out dataset, rounded to two decimal places
data.describe().round(2)
data_corr = data.corr()
data_corr
plt.figure(figsize=(8,6))
sns.set_context('paper')
sns.heatmap(data_corr, annot=True, cmap='Blues', fmt='.0%')
"""From the above correlation plot we can see correlations between "output" and the following:
* cp (43%)
* thalachh (42%)
* slp (34%)
The above attributes represent the highest correlated attributes, thought there are some notable correlations listed below:
* age and caa, trtbps
* cp and thalachh
* chol and age
* thalachh and slp, cp
* exng and oldpeak
* oldpeak and caa
* thall and exnp, oldpeak, age
"""
#broad look at data distribution
sns.pairplot(data)
"""The below plots depict the density of data in accordance with what would be a high likely hood and a low likelihood of heart attack risk. This gives insight to the shape of the data given the output of events."""
X = data.drop('output',axis=1)
y = data['output']
riskyDF = data[y == 1]
safeDF = data[y == 0]
for col in data.select_dtypes(include=['float64','int64']):
plt.figure(figsize=(4,4))
sns.distplot(riskyDF[col],label='High Risk')
sns.distplot(safeDF[col],label='Low Risk')
plt.legend()
plt.show()
"""---
# Data Engineering/Modelling
Because the data is already in a numerical form (int-type), it will not be required to engineer the data or reencode values. Though, given the tasks ahead, we may require data scaling for input into specific classifier models.
We shall address this problem as we arrive to it. But for now, we can get to creating our train-test split.
"""
#train test splitting
y = data['output']
x = data.drop('output', axis=1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state=42)
"""---
# Classifier Model 1: KNN
First let’s explore using K-Nearest Neighbour (KNN) algorithm. The KNN algorithm assumes that similar things exist in proximity. In other words, similar things are near to each other. KNN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most like the available categories.
For this algorithm to work we need to scale the test/train data. To do this, I shall generate a pipeline. This will allow scaling the train/test data without taking up additional resources or modifying the original test/train split, as well as keep the use of our train/test split uniform across the other two classification models.
"""
KNN_pipeline = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier())])
KNN_pipeline.fit(x_train, y_train)
KNN_pipeline.score(x_test, y_test)
y_proba = KNN_pipeline.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""Can we increase our accuracy of our KNN Classifier?! Let's find out below by running the pipeline through a for-loop, increasing the number of neighbours for selection, and plotting the accuracy out to see the change in value."""
err = []
for i in range(1, 40):
model = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = i))])
model.fit(x_train, y_train)
pred_i = model.predict(x_test)
err.append(np.mean(pred_i != y_test))
plt.figure(figsize =(10, 8))
plt.plot(range(1, 40), err, color ='blue',
linestyle ='dashed', marker ='o',
markerfacecolor ='blue', markersize = 8)
plt.title('Mean Err = f(K)')
plt.xlabel('K')
plt.ylabel('Mean Err')
"""The above output suggests that a value of "5" should be the most optimized value."""
KNN_pipeline_Opt = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = 5))])
KNN_pipeline_Opt.fit(x_train, y_train)
y_proba = KNN_pipeline_Opt.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""However, this seems to be negligible since the resultant accuracy has no increased any more than the first model.
---
# Classifier Model 2: Decision Tree Classification
A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. It is one way to display an algorithm that only contains conditional control statements.
However for this use case, we are going to use it to classify attributes to create a process of decision to predict a patients possibility of a heart attack.
"""
dt = DecisionTreeClassifier(random_state=42)
dt = dt.fit(x_train, y_train)
dt.tree_.node_count, dt.tree_.max_depth
# The error on the training and test data sets (Taken from workbook)
y_train_pred = dt.predict(x_train)
y_test_pred = dt.predict(x_test)
def measur | e, y_pred, label):
return pd.Series({'accuracy':accuracy_score(y_true, y_pred),
'precision': precision_score(y_true, y_pred),
'recall': recall_score(y_true, y_pred),
'f1': f1_score(y_true, y_pred)},
name=label)
train_test_full_error = pd.concat([measure_error(y_train, y_train_pred, 'train'),
measure_error(y_test, y_test_pred, 'test')],
axis=1)
train_test_full_error
"""The above output shows out accuracy prediction. This is quite low, could it be improved with Grid Search Cross Validation (GSCV)?! Let's find out below:"""
# Grid Search optimization
param_grid = {'max_depth':range(1, dt.tree_.max_depth+1, 2), 'max_features': range(1, len(dt.feature_importances_)+1)}
GR = GridSearchCV(DecisionTreeClassifier(random_state=42), param_grid=param_grid, scoring='accuracy', n_jobs=-1)
GR = GR.fit(x_train, y_train)
GR.best_estimator_.tree_.node_count, GR.best_estimator_.tree_.max_depth
y_train_pred_gr = GR.predict(x_train)
y_test_pred_gr = GR.predict(x_test)
train_test_gr_error = pd.concat([measure_error(y_train, y_train_pred_gr, 'train'),
measure_error(y_test, y_test_pred_gr, 'test')],
axis=1)
train_test_gr_error
"""We have increased the accuracy by 0.01 but at the cost of our precision. Also, our recall has increased significantly. Let’s look at the two different trees:"""
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(dt, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_1.png'
graph.write_png(filename)
Image(filename=filename)
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(GR.best_estimator_, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_2.png'
graph.write_png(filename)
Image(filename=filename)
### END SOLUTION
"""---
# Classifier Model 3: Random Forest Classification
Random forest is an ensemble machine learning algorithm. A forest is comprised of trees. It is said that the more trees it has, the more robust a forest is. Random forests create decision trees on randomly selected data samples, gets prediction from each tree and selects the best solution by means of voting. It also provides a pretty good indicator of the feature importance. Let’s see how well our heart attack data is classified by Random Forest.
"""
# Initialize the random forest estimator
# Note that the number of trees is not setup here
RF = RandomForestClassifier(oob_score=True, random_state=42, warm_start=True, n_jobs=-1)
oob_list = list()
# Iterate through all of the possibilities for
# number of trees
for n_trees in [15, 20, 30, 40, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000]:
# Use this to set the number of trees
RF.set_params(n_estimators=n_trees)
# Fit the model
RF.fit(x_train, y_train)
# Get the oob error
oob_error = 1 - RF.oob_score_
# Store it
oob_list.append(pd.Series({'n_trees': n_trees, 'oob': oob_error}))
rf_oob_df = pd.concat(oob_list, axis=1).T.set_index('n_trees')
ax = rf_oob_df.plot(legend=False, marker='x', figsize=(14, 7), linewidth=5)
ax.set(ylabel='out-of-bag error');
"""The key is to reduce our out-of-bag (OOB) error. We do this by increasing the number of possibilities and finding the possibility which produced the lowest OOB score. In this case, the lowest score is "700""""
from sklearn.metrics import f1_score, roc_auc_score
# Random forest with 700 estimators
model = RF.set_params(n_estimators=700)
y_pred = model.predict(x_test)
cr = classification_report(y_test, y_pred)
print(cr)
score_df = pd.DataFrame({'accuracy': accuracy_score(y_test, y_pred),
'precision': precision_score(y_test, y_pred),
'recall': recall_score(y_test, y_pred),
'f1': f1_score(y_test, y_pred),
'auc': roc_auc_score(y_test, y_pred)},
index=pd.Index([0]))
print(score_df)
"""Picking our most optimised number (700), we achieve an accuracy of 0.85 (rounded up). Let’s visually see how this is reflected in the ROC curve and precision/recall curve."""
sns.set_context('talk')
fig, axList = plt.subplots(ncols=2)
fig.set_size_inches(16, 8)
# Get the probabilities for each of the two categories
y_prob = model.predict_proba(x_test)
# Plot the ROC-AUC curve
ax = axList[0]
fpr, tpr, thresholds = roc_curve(y_test, y_prob[:,1])
ax.plot(fpr, tpr, linewidth=5)
# It is customary to draw a diagonal dotted line in ROC plots.
# This is to indicate completely random prediction. Deviation from this
# dotted line towards the upper left corner signifies the power of the model.
ax.plot([0, 1], [0, 1], ls='--', color='black', lw=.3)
ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='ROC curve')
ax.grid(True)
# Plot the precision-recall curve
ax = axList[1]
precision, recall, _ = precision_recall_curve(y_test, y_prob[:,1])
ax.plot(recall, precision, linewidth=5)
ax.set(xlabel='Recall', ylabel='Precision', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='Precision-Recall curve')
ax.grid(True)
plt.tight_layout()
"""Looking at the ROC curve, the true positive rate begins to severely fall off as the precision exceeds 8.0 resulting in a 60% hance if the true positive rate is 100% of a false positive rate.
Similarly, to the precision/recall curve. Anything above a 0.7 in the recall leads to a sharp drop off in the precision rate.
---
# Summary/Key Findings/Next Steps
To summarize, this task has explored Heart Attack data and the accuracy of three different classification models to measure predictability of a heart attack. We have explored:
* KNN (accuracy: 0.8571428571428571)
* Decision Tree's (accuracy: 0.835165 given the test data and after GSCV)
* Random Forests (accuracy: 0.846154)
Given the scores of accuracies, I beleive a KNN model is of best fit for the data. However, to accommodate for whats easier to demonstrate explainability of the model, I do feel that decision tree's best fit this case as there is an easy visual representation to the structure of the decision-making process.
Precision was a mixed batch, given if recall was a factor, Decision tree's with GSCV optimisation provided the highest Recall but compromises with precision. Ultimately you want a system with high precision and high recall which return many results, with all results labelled correctly.
""" | e_error(y_tru | identifier_name |
supervised_ml_(classification)_assignment_(final).py | # -*- coding: utf-8 -*-
"""Supervised_ML_(Classification)_assignment_(final)
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dt_czoLEqYxIoCA-v7Ynu0XHCWHnFWsB
"""
#import of libraries
import pandas as pd
import glob
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import RobustScaler, StandardScaler, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, classification_report, roc_curve, precision_score, recall_score
from io import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings('ignore')
"""---
# Supervised Machine Learning (Classification) on Heart Attack Data
The follow assignment will explore Heart Attack Data and explore 3 separate models to assess the accuracy of predicting heart attack. The dataset used in this task can be found at; https://www.kaggle.com/rashikrahmanpritom/heart-attack-analysis-prediction-dataset
Features:
* Age : Age of the patient
* Sex : Sex of the patient
* exang: exercise induced angina (1 = yes; 0 = no)
* ca: number of major vessels (0-3)
* cp : Chest Pain type chest pain type :
* cp Value 1: typical angina
* cp Value 2: atypical angina
* cp Value 3: non-anginal pain
* cp Value 4: asymptomatic
* trtbps : resting blood pressure (in mm Hg)
* chol : cholesterol in mg/dl fetched via BMI sensor
* fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)
* rest_ecg : resting electrocardiographic results :
* Value 0: normal
* Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
* Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
* thalach : maximum heart rate achieved
* target : 0= less chance of heart attack 1= more chance of heart attack
Let's begin by importing our data into pandas.
"""
#path must point to the heart.csv file.
data = pd.read_csv('/content/heart.csv', index_col=None, header=0)
data.head()
data.shape
"""---
# Data Cleaning
Before we start exploring our dataset, lets first do some simple checks to see if we need to do anything. Below, we shall check our datatypes, see if there are any missing values in each row, and check for any duplicate values.
"""
#Count missing values in each column.
data.isna().sum()
#duplicate values check
data.duplicated().sum()
data.loc[data.duplicated(keep=False),:]
"""As we can see, we are primarily working with "Int"-type values. This means we do not require any more work to change the features of the data. We also have no absent attributes in each row, but we have discovered duplicate values. All we need to do is just drop this value and continue forward."""
#drop duplicates
data.drop_duplicates(keep='first', inplace=True)
"""---
# Data Exploration
Let’s look at our data and see any noticeable correlations. The below table describes how our data is distributes per each column. Though, this doesnt represent much use since some of the attributes are numerical encodings and aren’t represented well like this.
In order to see correlations, lets visually interpret this data through a heatmap of correlated values, and a pair plot to draw visual inferences between columns.
"""
#Description of out dataset, rounded to two decimal places
data.describe().round(2)
data_corr = data.corr()
data_corr
plt.figure(figsize=(8,6))
sns.set_context('paper')
sns.heatmap(data_corr, annot=True, cmap='Blues', fmt='.0%')
"""From the above correlation plot we can see correlations between "output" and the following:
* cp (43%)
* thalachh (42%)
* slp (34%)
The above attributes represent the highest correlated attributes, thought there are some notable correlations listed below:
* age and caa, trtbps
* cp and thalachh
* chol and age
* thalachh and slp, cp
* exng and oldpeak
* oldpeak and caa
* thall and exnp, oldpeak, age
"""
#broad look at data distribution
sns.pairplot(data)
"""The below plots depict the density of data in accordance with what would be a high likely hood and a low likelihood of heart attack risk. This gives insight to the shape of the data given the output of events."""
X = data.drop('output',axis=1)
y = data['output']
riskyDF = data[y == 1]
safeDF = data[y == 0]
for col in data.select_dtypes(include=['float64','int64']):
plt.figure(figsize=(4,4))
sns.distplot(riskyDF[col],label='High Risk')
sns.distplot(safeDF[col],label='Low Risk')
plt.legend()
plt.show()
"""---
# Data Engineering/Modelling
Because the data is already in a numerical form (int-type), it will not be required to engineer the data or reencode values. Though, given the tasks ahead, we may require data scaling for input into specific classifier models.
We shall address this problem as we arrive to it. But for now, we can get to creating our train-test split.
"""
#train test splitting
y = data['output']
x = data.drop('output', axis=1)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state=42)
"""---
# Classifier Model 1: KNN
First let’s explore using K-Nearest Neighbour (KNN) algorithm. The KNN algorithm assumes that similar things exist in proximity. In other words, similar things are near to each other. KNN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most like the available categories.
For this algorithm to work we need to scale the test/train data. To do this, I shall generate a pipeline. This will allow scaling the train/test data without taking up additional resources or modifying the original test/train split, as well as keep the use of our train/test split uniform across the other two classification models.
"""
KNN_pipeline = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier())])
KNN_pipeline.fit(x_train, y_train)
KNN_pipeline.score(x_test, y_test)
y_proba = KNN_pipeline.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""Can we increase our accuracy of our KNN Classifier?! Let's find out below by running the pipeline through a for-loop, increasing the number of neighbours for selection, and plotting the accuracy out to see the change in value."""
err = []
for i in range(1, 40):
model = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = i))])
model.fit(x_train, y_train)
pred_i = model.predict(x_test)
err.append(np.mean(pred_i != y_test))
plt.figure(figsize =(10, 8))
plt.plot(range(1, 40), err, color ='blue',
linestyle ='dashed', marker ='o',
markerfacecolor ='blue', markersize = 8)
plt.title('Mean Err = f(K)')
plt.xlabel('K')
plt.ylabel('Mean Err')
"""The above output suggests that a value of "5" should be the most optimized value."""
KNN_pipeline_Opt = Pipeline([('ss', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors = 5))])
KNN_pipeline_Opt.fit(x_train, y_train)
y_proba = KNN_pipeline_Opt.predict_proba(x_test)
y_pred = np.argmax(y_proba,axis=1)
print("KNN : ", accuracy_score(y_test, y_pred))
"""However, this seems to be negligible since the resultant accuracy has no increased any more than the first model.
---
# Classifier Model 2: Decision Tree Classification
A decision tree is a decision support tool that uses a tree-like model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. It is one way to display an algorithm that only contains conditional control statements.
However for this use case, we are going to use it to classify attributes to create a process of decision to predict a patients possibility of a heart attack.
"""
dt = DecisionTreeClassifier(random_state=42)
dt = dt.fit(x_train, y_train)
dt.tree_.node_count, dt.tree_.max_depth
# The error on the training and test data sets (Taken from workbook)
y_train_pred = dt.predict(x_train)
y_test_pred = dt.predict(x_test)
def measure_error(y_true, y_pred, label):
return | n_test_full_error = pd.concat([measure_error(y_train, y_train_pred, 'train'),
measure_error(y_test, y_test_pred, 'test')],
axis=1)
train_test_full_error
"""The above output shows out accuracy prediction. This is quite low, could it be improved with Grid Search Cross Validation (GSCV)?! Let's find out below:"""
# Grid Search optimization
param_grid = {'max_depth':range(1, dt.tree_.max_depth+1, 2), 'max_features': range(1, len(dt.feature_importances_)+1)}
GR = GridSearchCV(DecisionTreeClassifier(random_state=42), param_grid=param_grid, scoring='accuracy', n_jobs=-1)
GR = GR.fit(x_train, y_train)
GR.best_estimator_.tree_.node_count, GR.best_estimator_.tree_.max_depth
y_train_pred_gr = GR.predict(x_train)
y_test_pred_gr = GR.predict(x_test)
train_test_gr_error = pd.concat([measure_error(y_train, y_train_pred_gr, 'train'),
measure_error(y_test, y_test_pred_gr, 'test')],
axis=1)
train_test_gr_error
"""We have increased the accuracy by 0.01 but at the cost of our precision. Also, our recall has increased significantly. Let’s look at the two different trees:"""
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(dt, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_1.png'
graph.write_png(filename)
Image(filename=filename)
# Create an output destination for the file
dot_data = StringIO()
export_graphviz(GR.best_estimator_, out_file=dot_data, filled=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# View the tree image
filename = 'tree_2.png'
graph.write_png(filename)
Image(filename=filename)
### END SOLUTION
"""---
# Classifier Model 3: Random Forest Classification
Random forest is an ensemble machine learning algorithm. A forest is comprised of trees. It is said that the more trees it has, the more robust a forest is. Random forests create decision trees on randomly selected data samples, gets prediction from each tree and selects the best solution by means of voting. It also provides a pretty good indicator of the feature importance. Let’s see how well our heart attack data is classified by Random Forest.
"""
# Initialize the random forest estimator
# Note that the number of trees is not setup here
RF = RandomForestClassifier(oob_score=True, random_state=42, warm_start=True, n_jobs=-1)
oob_list = list()
# Iterate through all of the possibilities for
# number of trees
for n_trees in [15, 20, 30, 40, 50, 100, 150, 200, 300, 400, 500, 600, 700, 800, 900, 1000]:
# Use this to set the number of trees
RF.set_params(n_estimators=n_trees)
# Fit the model
RF.fit(x_train, y_train)
# Get the oob error
oob_error = 1 - RF.oob_score_
# Store it
oob_list.append(pd.Series({'n_trees': n_trees, 'oob': oob_error}))
rf_oob_df = pd.concat(oob_list, axis=1).T.set_index('n_trees')
ax = rf_oob_df.plot(legend=False, marker='x', figsize=(14, 7), linewidth=5)
ax.set(ylabel='out-of-bag error');
"""The key is to reduce our out-of-bag (OOB) error. We do this by increasing the number of possibilities and finding the possibility which produced the lowest OOB score. In this case, the lowest score is "700""""
from sklearn.metrics import f1_score, roc_auc_score
# Random forest with 700 estimators
model = RF.set_params(n_estimators=700)
y_pred = model.predict(x_test)
cr = classification_report(y_test, y_pred)
print(cr)
score_df = pd.DataFrame({'accuracy': accuracy_score(y_test, y_pred),
'precision': precision_score(y_test, y_pred),
'recall': recall_score(y_test, y_pred),
'f1': f1_score(y_test, y_pred),
'auc': roc_auc_score(y_test, y_pred)},
index=pd.Index([0]))
print(score_df)
"""Picking our most optimised number (700), we achieve an accuracy of 0.85 (rounded up). Let’s visually see how this is reflected in the ROC curve and precision/recall curve."""
sns.set_context('talk')
fig, axList = plt.subplots(ncols=2)
fig.set_size_inches(16, 8)
# Get the probabilities for each of the two categories
y_prob = model.predict_proba(x_test)
# Plot the ROC-AUC curve
ax = axList[0]
fpr, tpr, thresholds = roc_curve(y_test, y_prob[:,1])
ax.plot(fpr, tpr, linewidth=5)
# It is customary to draw a diagonal dotted line in ROC plots.
# This is to indicate completely random prediction. Deviation from this
# dotted line towards the upper left corner signifies the power of the model.
ax.plot([0, 1], [0, 1], ls='--', color='black', lw=.3)
ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='ROC curve')
ax.grid(True)
# Plot the precision-recall curve
ax = axList[1]
precision, recall, _ = precision_recall_curve(y_test, y_prob[:,1])
ax.plot(recall, precision, linewidth=5)
ax.set(xlabel='Recall', ylabel='Precision', xlim=[-.01, 1.01], ylim=[-.01, 1.01], title='Precision-Recall curve')
ax.grid(True)
plt.tight_layout()
"""Looking at the ROC curve, the true positive rate begins to severely fall off as the precision exceeds 8.0 resulting in a 60% hance if the true positive rate is 100% of a false positive rate.
Similarly, to the precision/recall curve. Anything above a 0.7 in the recall leads to a sharp drop off in the precision rate.
---
# Summary/Key Findings/Next Steps
To summarize, this task has explored Heart Attack data and the accuracy of three different classification models to measure predictability of a heart attack. We have explored:
* KNN (accuracy: 0.8571428571428571)
* Decision Tree's (accuracy: 0.835165 given the test data and after GSCV)
* Random Forests (accuracy: 0.846154)
Given the scores of accuracies, I beleive a KNN model is of best fit for the data. However, to accommodate for whats easier to demonstrate explainability of the model, I do feel that decision tree's best fit this case as there is an easy visual representation to the structure of the decision-making process.
Precision was a mixed batch, given if recall was a factor, Decision tree's with GSCV optimisation provided the highest Recall but compromises with precision. Ultimately you want a system with high precision and high recall which return many results, with all results labelled correctly.
""" | pd.Series({'accuracy':accuracy_score(y_true, y_pred),
'precision': precision_score(y_true, y_pred),
'recall': recall_score(y_true, y_pred),
'f1': f1_score(y_true, y_pred)},
name=label)
trai | identifier_body |
tls.go | package main
import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/big"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"sync"
"time"
"github.com/open-ch/ja3"
"go.starlark.net/starlark"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/net/http2"
)
// Intercept TLS (HTTPS) connections.
// loadCertificate loads the TLS certificate specified by certFile and keyFile
// into tlsCert.
func (c *config) loadCertificate() {
if c.CertFile != "" && c.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
log.Println("Error loading TLS certificate:", err)
return
}
c.TLSCert = cert
parsed, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
log.Println("Error parsing X509 certificate:", err)
return
}
c.ParsedTLSCert = parsed
c.TLSReady = true
c.ServeMux.HandleFunc("/cert.der", func(w http.ResponseWriter, r *http.Request) {
tlsCert := c.TLSCert
w.Header().Set("Content-Type", "application/x-x509-ca-cert")
w.Write(tlsCert.Certificate[len(tlsCert.Certificate)-1])
})
}
}
// connectDirect connects to serverAddr and copies data between it and conn.
// extraData is sent to the server first.
func connectDirect(conn net.Conn, serverAddr string, extraData []byte, dialer *net.Dialer) (uploaded, downloaded int64) {
activeConnections.Add(1)
defer activeConnections.Done()
serverConn, err := dialer.Dial("tcp", serverAddr)
if err != nil {
log.Printf("error with pass-through of SSL connection to %s: %s", serverAddr, err)
conn.Close()
return
}
if extraData != nil {
// There may also be data waiting in the socket's input buffer;
// read it before we send the data on, so that the first packet of
// the connection doesn't get split in two.
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
buf := make([]byte, 2000)
n, _ := conn.Read(buf)
conn.SetReadDeadline(time.Time{})
if n > 0 {
extraData = append(extraData, buf[:n]...)
}
serverConn.Write(extraData)
}
ulChan := make(chan int64)
go func() {
n, _ := io.Copy(conn, serverConn)
time.Sleep(time.Second)
conn.Close()
ulChan <- n + int64(len(extraData))
}()
downloaded, _ = io.Copy(serverConn, conn)
serverConn.Close()
uploaded = <-ulChan
return uploaded, downloaded
}
type tlsFingerprintKey struct{}
// SSLBump performs a man-in-the-middle attack on conn, to filter the HTTPS
// traffic. serverAddr is the address (host:port) of the server the client was
// trying to connect to. user is the username to use for logging; authUser is
// the authenticated user, if any; r is the CONNECT request, if any.
func SSLBump(conn net.Conn, serverAddr, user, authUser string, r *http.Request) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, 4096)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("SSLBump: panic serving connection to %s: %v\n%s", serverAddr, err, buf)
conn.Close()
}
}()
session := &TLSSession{
ServerAddr: serverAddr,
User: authUser,
}
if r != nil {
session.ConnectHeader = r.Header
}
client := conn.RemoteAddr().String()
if host, _, err := net.SplitHostPort(client); err == nil {
session.ClientIP = host
} else {
session.ClientIP = client
}
obsoleteVersion := false
invalidSSL := false
// Read the client hello so that we can find out the name of the server (not
// just the address).
clientHello, err := readClientHello(conn)
if err != nil {
logTLS(user, serverAddr, "", fmt.Errorf("error reading client hello: %v", err), false, "")
if _, ok := err.(net.Error); ok {
conn.Close()
return
} else if err == ErrObsoleteSSLVersion {
obsoleteVersion = true
if getConfig().BlockObsoleteSSL {
conn.Close()
return
}
} else if err == ErrInvalidSSL {
invalidSSL = true
} else {
conn.Close()
return
}
}
clientHelloInfo, err := parseClientHello(clientHello)
host, port, err := net.SplitHostPort(serverAddr)
if err != nil {
host = serverAddr
port = "443"
}
serverName := ""
if !obsoleteVersion && !invalidSSL {
if clientHelloInfo != nil && clientHelloInfo.ServerName != "" {
serverName = clientHelloInfo.ServerName
}
}
session.SNI = serverName
if session.ServerAddr == "" {
session.ServerAddr = net.JoinHostPort(serverName, "443")
}
if serverName == "" {
serverName = host
if ip := net.ParseIP(serverName); ip != nil {
// All we have is an IP address, not a name from a CONNECT request.
// See if we can do better by reverse DNS.
names, err := net.LookupAddr(serverName)
if err == nil && len(names) > 0 {
serverName = strings.TrimSuffix(names[0], ".")
}
}
}
if serverName == "" {
logTLS(user, "", "", errors.New("no SNI available"), false, "")
conn.Close()
return
}
// Filter a virtual CONNECT request.
cr := &http.Request{
Method: "CONNECT",
Header: make(http.Header),
Host: net.JoinHostPort(serverName, port),
URL: &url.URL{Host: serverName},
RemoteAddr: conn.RemoteAddr().String(),
}
var tlsFingerprint string
j, err := ja3.ComputeJA3FromSegment(clientHello)
if err != nil {
log.Printf("Error generating TLS fingerprint: %v", err)
} else {
tlsFingerprint = j.GetJA3Hash()
ctx := cr.Context()
ctx = context.WithValue(ctx, tlsFingerprintKey{}, tlsFingerprint)
cr = cr.WithContext(ctx)
}
var tally map[rule]int
var scores map[string]int
var reqACLs map[string]bool
{
conf := getConfig()
tally = conf.URLRules.MatchingRules(cr.URL)
scores = conf.categoryScores(tally)
reqACLs = conf.ACLs.requestACLs(cr, authUser)
if invalidSSL {
reqACLs["invalid-ssl"] = true
}
if r == nil {
// It's a transparently-intercepted request instead of a real
// CONNECT request.
reqACLs["transparent"] = true
}
}
session.ACLs.data = reqACLs
session.Scores.data = scores
session.PossibleActions = []string{"allow", "block"}
if getConfig().TLSReady && !obsoleteVersion && !invalidSSL |
callStarlarkFunctions("ssl_bump", session)
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
if session.SourceIP != nil {
dialer.LocalAddr = &net.TCPAddr{
IP: session.SourceIP,
}
}
session.chooseAction()
logAccess(cr, nil, 0, false, user, tally, scores, session.Action, "", session.Ignored, nil)
switch session.Action.Action {
case "allow", "":
upload, download := connectDirect(conn, session.ServerAddr, clientHello, dialer)
logAccess(cr, nil, upload+download, false, user, tally, scores, session.Action, "", session.Ignored, nil)
return
case "block":
conn.Close()
return
}
var cert tls.Certificate
var rt http.RoundTripper
var http2Support bool
closeChan := make(chan struct{})
server := &http.Server{
IdleTimeout: getConfig().CloseIdleConnections,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateClosed:
close(closeChan)
}
},
}
serverConnConfig := &tls.Config{
ServerName: session.SNI,
InsecureSkipVerify: true,
}
clientSupportsHTTP2 := false
if clientHelloInfo != nil {
for _, p := range clientHelloInfo.SupportedProtos {
if p == "h2" {
clientSupportsHTTP2 = true
}
}
}
if clientSupportsHTTP2 && getConfig().HTTP2Upstream {
serverConnConfig.NextProtos = []string{"h2", "http/1.1"}
}
serverConn, err := tls.DialWithDialer(dialer, "tcp", session.ServerAddr, serverConnConfig)
if err == nil {
defer serverConn.Close()
state := serverConn.ConnectionState()
serverCert := state.PeerCertificates[0]
valid := validCert(serverCert, state.PeerCertificates[1:])
cert, err = imitateCertificate(serverCert, !valid, session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
connectDirect(conn, session.ServerAddr, clientHello, dialer)
return
}
http2Support = state.NegotiatedProtocol == "h2" && state.NegotiatedProtocolIsMutual
d := &tls.Dialer{
NetDialer: dialer,
Config: &tls.Config{
ServerName: session.SNI,
RootCAs: certPoolWith(serverConn.ConnectionState().PeerCertificates),
},
}
if !valid {
d.Config.InsecureSkipVerify = true
originalCert := serverConn.ConnectionState().PeerCertificates[0]
d.Config.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
cert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return err
}
if cert.Equal(originalCert) {
return nil
}
return errCertMismatch
}
}
if http2Support {
d.Config.NextProtos = []string{"h2"}
var once sync.Once
rt = &http2.Transport{
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
var c net.Conn
once.Do(func() {
c = serverConn
})
if c != nil {
return c, nil
}
logVerbose("redial", "Redialing HTTP/2 connection to %s (%s)", session.SNI, session.ServerAddr)
return d.Dial("tcp", session.ServerAddr)
},
TLSClientConfig: d.Config,
StrictMaxConcurrentStreams: true,
}
} else {
rt = &connTransport{
Conn: serverConn,
Redial: func(ctx context.Context) (net.Conn, error) {
logVerbose("redial", "Redialing connection to %s (%s)", session.SNI, session.ServerAddr)
return d.DialContext(ctx, "tcp", session.ServerAddr)
},
}
}
} else {
cert, err = fakeCertificate(session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
conn.Close()
return
}
rt = httpTransport
}
session.Freeze()
server.Handler = &proxyHandler{
TLS: true,
tlsFingerprint: tlsFingerprint,
connectPort: port,
user: authUser,
rt: rt,
session: session,
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert, getConfig().TLSCert},
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519, // Go 1.8 only
},
}
http2Downstream := getConfig().HTTP2Downstream && http2Support
if http2Downstream {
tlsConfig.NextProtos = []string{"h2", "http/1.1"}
}
tlsConn := tls.Server(&insertingConn{conn, clientHello}, tlsConfig)
err = tlsConn.Handshake()
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error in handshake with client: %v", err), false, tlsFingerprint)
conn.Close()
return
}
logTLS(user, session.ServerAddr, serverName, nil, false, tlsFingerprint)
if http2Downstream {
http2.ConfigureServer(server, nil)
}
listener := &singleListener{conn: tlsConn}
server.Serve(listener)
// Wait for the connection to finish.
<-closeChan
}
// A TLSSession is the parameter for the Starlark ssl_bump function.
type TLSSession struct {
SNI string
ServerAddr string
User string
ClientIP string
// SourceIP is the IP address of the network interface to be used fo dial
// the upstream connection.
SourceIP net.IP
// ConnectHeader is the header from the CONNECT request, if any.
ConnectHeader http.Header
scoresAndACLs
frozen bool
misc SyncDict
}
type scoresAndACLs struct {
ACLs StringSet
Scores StringIntDict
Tally map[rule]int
PossibleActions []string
Action ACLActionRule
Ignored []string
}
func (s *scoresAndACLs) currentAction() (ar ACLActionRule, ignored []string) {
if s.Action.Action != "" {
return s.Action, s.Ignored
}
conf := getConfig()
ar, ignored = conf.ChooseACLCategoryAction(s.ACLs.data, s.Scores.data, conf.Threshold, s.PossibleActions...)
if ar.Action == "" {
ar.Action = "allow"
}
return ar, ignored
}
func (s *scoresAndACLs) chooseAction() {
s.Action, s.Ignored = s.currentAction()
}
func (s *scoresAndACLs) setAction(newAction string) error {
for _, a := range s.PossibleActions {
if newAction == a {
s.Action = ACLActionRule{
Action: newAction,
Needed: []string{"starlark"},
}
return nil
}
}
return fmt.Errorf("can't set action to %q; expected one of %q", newAction, s.PossibleActions)
}
func (s *TLSSession) String() string {
return fmt.Sprintf("TLSSession(%q, %q)", s.SNI, s.ServerAddr)
}
func (s *TLSSession) Type() string {
return "TLSSession"
}
func (s *TLSSession) Freeze() {
if !s.frozen {
s.frozen = true
s.ACLs.Freeze()
s.Scores.Freeze()
}
}
func (s *TLSSession) Truth() starlark.Bool {
return starlark.True
}
func (s *TLSSession) Hash() (uint32, error) {
return 0, errors.New("unhashable type: TLSSession")
}
var tlsSessionAttrNames = []string{"sni", "server_addr", "user", "client_ip", "acls", "scores", "source_ip", "action", "possible_actions", "header", "misc"}
func (s *TLSSession) AttrNames() []string {
return tlsSessionAttrNames
}
func (s *TLSSession) Attr(name string) (starlark.Value, error) {
switch name {
case "sni":
return starlark.String(s.SNI), nil
case "server_addr":
return starlark.String(s.ServerAddr), nil
case "user":
return starlark.String(s.User), nil
case "client_ip":
return starlark.String(s.ClientIP), nil
case "source_ip":
return starlark.String(s.SourceIP.String()), nil
case "acls":
return &s.ACLs, nil
case "scores":
return &s.Scores, nil
case "action":
ar, _ := s.currentAction()
return starlark.String(ar.Action), nil
case "possible_actions":
return stringTuple(s.PossibleActions), nil
case "header":
return &HeaderDict{data: s.ConnectHeader}, nil
case "misc":
return &s.misc, nil
default:
return nil, nil
}
}
func (s *TLSSession) SetField(name string, val starlark.Value) error {
if s.frozen {
return errors.New("can't set a field of a frozen object")
}
switch name {
case "sni":
return assignStarlarkString(&s.SNI, val)
case "server_addr":
return assignStarlarkString(&s.ServerAddr, val)
case "source_ip":
var ip string
if err := assignStarlarkString(&ip, val); err != nil {
return err
}
parsed := net.ParseIP(ip)
if parsed == nil {
return fmt.Errorf("%q is not a valid IP address", ip)
}
s.SourceIP = parsed
return nil
case "action":
var newAction string
if err := assignStarlarkString(&newAction, val); err != nil {
return err
}
return s.setAction(newAction)
default:
return starlark.NoSuchAttrError(fmt.Sprintf("can't assign to .%s field of TLSSession", name))
}
}
var errCertMismatch = errors.New("server certificate changed between original connection and redial")
func certPoolWith(certs []*x509.Certificate) *x509.CertPool {
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool
}
// A insertingConn is a net.Conn that inserts extra data at the start of the
// incoming data stream.
type insertingConn struct {
net.Conn
extraData []byte
}
func (c *insertingConn) Read(p []byte) (n int, err error) {
if len(c.extraData) == 0 {
return c.Conn.Read(p)
}
n = copy(p, c.extraData)
c.extraData = c.extraData[n:]
return
}
// A singleListener is a net.Listener that returns a single connection, then
// gives the error io.EOF.
type singleListener struct {
conn net.Conn
once sync.Once
}
func (s *singleListener) Accept() (net.Conn, error) {
var c net.Conn
s.once.Do(func() {
c = s.conn
})
if c != nil {
return c, nil
}
return nil, io.EOF
}
func (s *singleListener) Close() error {
s.once.Do(func() {
s.conn.Close()
})
return nil
}
func (s *singleListener) Addr() net.Addr {
return s.conn.LocalAddr()
}
// imitateCertificate returns a new TLS certificate that has most of the same
// data as serverCert but is signed by Redwood's root certificate, or
// self-signed.
func imitateCertificate(serverCert *x509.Certificate, selfSigned bool, sni string) (cert tls.Certificate, err error) {
conf := getConfig()
// Use a hash of the real certificate (plus some other things) as the serial number.
h := md5.New()
h.Write(serverCert.Raw)
for _, c := range conf.TLSCert.Certificate {
h.Write(c)
}
if sni != "" {
io.WriteString(h, sni)
}
template := &x509.Certificate{
SerialNumber: big.NewInt(0).SetBytes(h.Sum(nil)),
Subject: serverCert.Subject,
NotBefore: serverCert.NotBefore,
NotAfter: serverCert.NotAfter,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: serverCert.ExtKeyUsage,
UnknownExtKeyUsage: serverCert.UnknownExtKeyUsage,
BasicConstraintsValid: false,
SubjectKeyId: nil,
DNSNames: serverCert.DNSNames,
PermittedDNSDomainsCritical: serverCert.PermittedDNSDomainsCritical,
PermittedDNSDomains: serverCert.PermittedDNSDomains,
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
// If sni is not blank, make a certificate that covers only that domain,
// instead of all the domains covered by the original certificate.
if sni != "" {
template.DNSNames = []string{sni}
template.Subject.CommonName = sni
}
var newCertBytes []byte
if selfSigned {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, template, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
} else {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
}
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
if !selfSigned {
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
}
return newCert, nil
}
// fakeCertificate returns a fabricated certificate for the server identified by sni.
func fakeCertificate(sni string) (cert tls.Certificate, err error) {
conf := getConfig()
serial, err := rand.Int(rand.Reader, big.NewInt(1<<62))
if err != nil {
return tls.Certificate{}, err
}
y, m, d := time.Now().Date()
template := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{CommonName: sni},
NotBefore: time.Date(y, m, d, 0, 0, 0, 0, time.Local),
NotAfter: time.Date(y, m+1, d, 0, 0, 0, 0, time.Local),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
DNSNames: []string{sni},
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
newCertBytes, err := x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
return newCert, nil
}
func validCert(cert *x509.Certificate, intermediates []*x509.Certificate) bool {
conf := getConfig()
pool := certPoolWith(intermediates)
_, err := cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
if conf.ExtraRootCerts != nil {
_, err = cert.Verify(x509.VerifyOptions{Roots: conf.ExtraRootCerts, Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
return true
}
}
// Before we give up, we'll try fetching some intermediate certificates.
if len(cert.IssuingCertificateURL) == 0 {
return false
}
toFetch := cert.IssuingCertificateURL
fetched := make(map[string]bool)
for i := 0; i < len(toFetch); i++ {
certURL := toFetch[i]
if fetched[certURL] {
continue
}
resp, err := http.Get(certURL)
if err == nil {
defer resp.Body.Close()
}
if err != nil || resp.StatusCode != 200 {
continue
}
fetchedCert, err := ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
// The fetched certificate might be in either DER or PEM format.
if bytes.Contains(fetchedCert, []byte("-----BEGIN CERTIFICATE-----")) {
// It's PEM.
var certDER *pem.Block
for {
certDER, fetchedCert = pem.Decode(fetchedCert)
if certDER == nil {
break
}
if certDER.Type != "CERTIFICATE" {
continue
}
thisCert, err := x509.ParseCertificate(certDER.Bytes)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
} else {
// Hopefully it's DER.
thisCert, err := x509.ParseCertificate(fetchedCert)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
}
_, err = cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
return false
}
var ErrObsoleteSSLVersion = errors.New("obsolete SSL protocol version")
var ErrInvalidSSL = errors.New("invalid first byte for SSL connection; possibly some other protocol")
func readClientHello(conn net.Conn) (hello []byte, err error) {
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
defer conn.SetReadDeadline(time.Time{})
var header [5]byte
n, err := io.ReadFull(conn, header[:])
hello = header[:n]
if err != nil {
return hello, err
}
if header[0] != 22 {
if header[0] == 128 {
return hello, ErrObsoleteSSLVersion
}
return hello, ErrInvalidSSL
}
if header[1] != 3 {
return hello, fmt.Errorf("expected major version of 3, got %d", header[1])
}
recordLen := int(header[3])<<8 | int(header[4])
if recordLen > 0x3000 {
return hello, fmt.Errorf("expected length less than 12kB, got %d", recordLen)
}
if recordLen < 4 {
return hello, fmt.Errorf("expected length of at least 4 bytes, got %d", recordLen)
}
protocolData := make([]byte, recordLen)
n, err = io.ReadFull(conn, protocolData)
hello = append(hello, protocolData[:n]...)
if err != nil {
return hello, err
}
if protocolData[0] != 1 {
return hello, fmt.Errorf("Expected message type 1 (ClientHello), got %d", protocolData[0])
}
protocolLen := int(protocolData[1])<<16 | int(protocolData[2])<<8 | int(protocolData[3])
if protocolLen != recordLen-4 {
return hello, fmt.Errorf("recordLen=%d, protocolLen=%d", recordLen, protocolLen)
}
return hello, nil
}
// parseClientHello parses some useful information out of a ClientHello message.
// It returns a ClientHelloInfo with only the following fields filled in:
// ServerName and SupportedProtocols.
func parseClientHello(data []byte) (*tls.ClientHelloInfo, error) {
// The implementation of this function is based on crypto/tls.clientHelloMsg.unmarshal
var info tls.ClientHelloInfo
s := cryptobyte.String(data)
// Skip message type, length, version, and random.
if !s.Skip(43) {
return nil, errors.New("too short")
}
var sessionID cryptobyte.String
if !s.ReadUint8LengthPrefixed(&sessionID) {
return nil, errors.New("bad session ID")
}
var cipherSuites cryptobyte.String
if !s.ReadUint16LengthPrefixed(&cipherSuites) {
return nil, errors.New("bad cipher suites")
}
var compressionMethods cryptobyte.String
if !s.ReadUint8LengthPrefixed(&compressionMethods) {
return nil, errors.New("bad compression methods")
}
if s.Empty() {
// no extensions
return &info, nil
}
var extensions cryptobyte.String
if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
return nil, errors.New("bad extensions")
}
for !extensions.Empty() {
var extension uint16
var extData cryptobyte.String
if !extensions.ReadUint16(&extension) || !extensions.ReadUint16LengthPrefixed(&extData) {
return nil, errors.New("bad extension")
}
switch extension {
case 0: // server name
var nameList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
return nil, errors.New("bad name list")
}
for !nameList.Empty() {
var nameType uint8
var serverName cryptobyte.String
if !nameList.ReadUint8(&nameType) || !nameList.ReadUint16LengthPrefixed(&serverName) || serverName.Empty() {
return nil, errors.New("bad entry in name list")
}
if nameType != 0 {
continue
}
if info.ServerName != "" {
return nil, errors.New("multiple server names")
}
info.ServerName = string(serverName)
if strings.HasSuffix(info.ServerName, ".") {
return nil, errors.New("server name ends with dot")
}
}
case 16: // ALPN
var protoList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
return nil, errors.New("bad ALPN protocol list")
}
for !protoList.Empty() {
var proto cryptobyte.String
if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
return nil, errors.New("bad ALPN protocol list entry")
}
info.SupportedProtos = append(info.SupportedProtos, string(proto))
}
default:
// ignore
continue
}
if !extData.Empty() {
return nil, errors.New("extra data at end of extension")
}
}
return &info, nil
}
func (c *config) addTrustedRoots(certPath string) error {
if c.ExtraRootCerts == nil {
c.ExtraRootCerts = x509.NewCertPool()
}
pem, err := ioutil.ReadFile(certPath)
if err != nil {
return err
}
if !c.ExtraRootCerts.AppendCertsFromPEM(pem) {
return fmt.Errorf("no certificates found in %s", certPath)
}
return nil
}
| {
session.PossibleActions = append(session.PossibleActions, "ssl-bump")
} | conditional_block |
tls.go | package main
import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/big"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"sync"
"time"
"github.com/open-ch/ja3"
"go.starlark.net/starlark"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/net/http2"
)
// Intercept TLS (HTTPS) connections.
// loadCertificate loads the TLS certificate specified by certFile and keyFile
// into tlsCert.
func (c *config) loadCertificate() {
if c.CertFile != "" && c.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
log.Println("Error loading TLS certificate:", err)
return
}
c.TLSCert = cert
parsed, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
log.Println("Error parsing X509 certificate:", err)
return
}
c.ParsedTLSCert = parsed
c.TLSReady = true
c.ServeMux.HandleFunc("/cert.der", func(w http.ResponseWriter, r *http.Request) {
tlsCert := c.TLSCert
w.Header().Set("Content-Type", "application/x-x509-ca-cert")
w.Write(tlsCert.Certificate[len(tlsCert.Certificate)-1])
})
}
}
// connectDirect connects to serverAddr and copies data between it and conn.
// extraData is sent to the server first.
func connectDirect(conn net.Conn, serverAddr string, extraData []byte, dialer *net.Dialer) (uploaded, downloaded int64) {
activeConnections.Add(1)
defer activeConnections.Done()
serverConn, err := dialer.Dial("tcp", serverAddr)
if err != nil {
log.Printf("error with pass-through of SSL connection to %s: %s", serverAddr, err)
conn.Close()
return
}
if extraData != nil {
// There may also be data waiting in the socket's input buffer;
// read it before we send the data on, so that the first packet of
// the connection doesn't get split in two.
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
buf := make([]byte, 2000)
n, _ := conn.Read(buf)
conn.SetReadDeadline(time.Time{})
if n > 0 {
extraData = append(extraData, buf[:n]...)
}
serverConn.Write(extraData)
}
ulChan := make(chan int64)
go func() {
n, _ := io.Copy(conn, serverConn)
time.Sleep(time.Second)
conn.Close()
ulChan <- n + int64(len(extraData))
}()
downloaded, _ = io.Copy(serverConn, conn)
serverConn.Close()
uploaded = <-ulChan
return uploaded, downloaded
}
type tlsFingerprintKey struct{}
// SSLBump performs a man-in-the-middle attack on conn, to filter the HTTPS
// traffic. serverAddr is the address (host:port) of the server the client was
// trying to connect to. user is the username to use for logging; authUser is
// the authenticated user, if any; r is the CONNECT request, if any.
func SSLBump(conn net.Conn, serverAddr, user, authUser string, r *http.Request) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, 4096)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("SSLBump: panic serving connection to %s: %v\n%s", serverAddr, err, buf)
conn.Close()
}
}()
session := &TLSSession{
ServerAddr: serverAddr,
User: authUser,
}
if r != nil {
session.ConnectHeader = r.Header
}
client := conn.RemoteAddr().String()
if host, _, err := net.SplitHostPort(client); err == nil {
session.ClientIP = host
} else {
session.ClientIP = client
}
obsoleteVersion := false
invalidSSL := false
// Read the client hello so that we can find out the name of the server (not
// just the address).
clientHello, err := readClientHello(conn)
if err != nil {
logTLS(user, serverAddr, "", fmt.Errorf("error reading client hello: %v", err), false, "")
if _, ok := err.(net.Error); ok {
conn.Close()
return
} else if err == ErrObsoleteSSLVersion {
obsoleteVersion = true
if getConfig().BlockObsoleteSSL {
conn.Close()
return
}
} else if err == ErrInvalidSSL {
invalidSSL = true
} else {
conn.Close()
return
}
}
clientHelloInfo, err := parseClientHello(clientHello)
host, port, err := net.SplitHostPort(serverAddr)
if err != nil {
host = serverAddr
port = "443"
}
serverName := ""
if !obsoleteVersion && !invalidSSL {
if clientHelloInfo != nil && clientHelloInfo.ServerName != "" {
serverName = clientHelloInfo.ServerName
}
}
session.SNI = serverName
if session.ServerAddr == "" {
session.ServerAddr = net.JoinHostPort(serverName, "443")
}
if serverName == "" {
serverName = host
if ip := net.ParseIP(serverName); ip != nil {
// All we have is an IP address, not a name from a CONNECT request.
// See if we can do better by reverse DNS.
names, err := net.LookupAddr(serverName)
if err == nil && len(names) > 0 {
serverName = strings.TrimSuffix(names[0], ".")
}
}
}
if serverName == "" {
logTLS(user, "", "", errors.New("no SNI available"), false, "")
conn.Close()
return
}
// Filter a virtual CONNECT request.
cr := &http.Request{
Method: "CONNECT",
Header: make(http.Header),
Host: net.JoinHostPort(serverName, port),
URL: &url.URL{Host: serverName},
RemoteAddr: conn.RemoteAddr().String(),
}
var tlsFingerprint string
j, err := ja3.ComputeJA3FromSegment(clientHello)
if err != nil {
log.Printf("Error generating TLS fingerprint: %v", err)
} else {
tlsFingerprint = j.GetJA3Hash()
ctx := cr.Context()
ctx = context.WithValue(ctx, tlsFingerprintKey{}, tlsFingerprint)
cr = cr.WithContext(ctx)
}
var tally map[rule]int
var scores map[string]int
var reqACLs map[string]bool
{
conf := getConfig()
tally = conf.URLRules.MatchingRules(cr.URL)
scores = conf.categoryScores(tally)
reqACLs = conf.ACLs.requestACLs(cr, authUser)
if invalidSSL {
reqACLs["invalid-ssl"] = true
}
if r == nil {
// It's a transparently-intercepted request instead of a real
// CONNECT request.
reqACLs["transparent"] = true
}
}
session.ACLs.data = reqACLs
session.Scores.data = scores
session.PossibleActions = []string{"allow", "block"}
if getConfig().TLSReady && !obsoleteVersion && !invalidSSL {
session.PossibleActions = append(session.PossibleActions, "ssl-bump")
}
callStarlarkFunctions("ssl_bump", session)
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
if session.SourceIP != nil {
dialer.LocalAddr = &net.TCPAddr{
IP: session.SourceIP,
}
}
session.chooseAction()
logAccess(cr, nil, 0, false, user, tally, scores, session.Action, "", session.Ignored, nil)
switch session.Action.Action {
case "allow", "":
upload, download := connectDirect(conn, session.ServerAddr, clientHello, dialer)
logAccess(cr, nil, upload+download, false, user, tally, scores, session.Action, "", session.Ignored, nil)
return
case "block":
conn.Close()
return
}
var cert tls.Certificate
var rt http.RoundTripper
var http2Support bool
closeChan := make(chan struct{})
server := &http.Server{
IdleTimeout: getConfig().CloseIdleConnections,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateClosed:
close(closeChan)
}
},
}
serverConnConfig := &tls.Config{
ServerName: session.SNI,
InsecureSkipVerify: true,
}
clientSupportsHTTP2 := false
if clientHelloInfo != nil {
for _, p := range clientHelloInfo.SupportedProtos {
if p == "h2" {
clientSupportsHTTP2 = true
}
}
}
if clientSupportsHTTP2 && getConfig().HTTP2Upstream {
serverConnConfig.NextProtos = []string{"h2", "http/1.1"}
}
serverConn, err := tls.DialWithDialer(dialer, "tcp", session.ServerAddr, serverConnConfig)
if err == nil {
defer serverConn.Close()
state := serverConn.ConnectionState()
serverCert := state.PeerCertificates[0]
valid := validCert(serverCert, state.PeerCertificates[1:])
cert, err = imitateCertificate(serverCert, !valid, session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
connectDirect(conn, session.ServerAddr, clientHello, dialer)
return
}
http2Support = state.NegotiatedProtocol == "h2" && state.NegotiatedProtocolIsMutual
d := &tls.Dialer{
NetDialer: dialer,
Config: &tls.Config{
ServerName: session.SNI,
RootCAs: certPoolWith(serverConn.ConnectionState().PeerCertificates),
},
}
if !valid {
d.Config.InsecureSkipVerify = true
originalCert := serverConn.ConnectionState().PeerCertificates[0]
d.Config.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
cert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return err
}
if cert.Equal(originalCert) {
return nil
}
return errCertMismatch
}
}
if http2Support {
d.Config.NextProtos = []string{"h2"}
var once sync.Once
rt = &http2.Transport{
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
var c net.Conn
once.Do(func() {
c = serverConn
})
if c != nil {
return c, nil
}
logVerbose("redial", "Redialing HTTP/2 connection to %s (%s)", session.SNI, session.ServerAddr)
return d.Dial("tcp", session.ServerAddr)
},
TLSClientConfig: d.Config,
StrictMaxConcurrentStreams: true,
}
} else {
rt = &connTransport{
Conn: serverConn,
Redial: func(ctx context.Context) (net.Conn, error) {
logVerbose("redial", "Redialing connection to %s (%s)", session.SNI, session.ServerAddr)
return d.DialContext(ctx, "tcp", session.ServerAddr)
},
}
}
} else {
cert, err = fakeCertificate(session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
conn.Close()
return
}
rt = httpTransport
}
session.Freeze()
server.Handler = &proxyHandler{
TLS: true,
tlsFingerprint: tlsFingerprint,
connectPort: port,
user: authUser,
rt: rt,
session: session,
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert, getConfig().TLSCert},
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519, // Go 1.8 only
},
}
http2Downstream := getConfig().HTTP2Downstream && http2Support
if http2Downstream {
tlsConfig.NextProtos = []string{"h2", "http/1.1"}
}
tlsConn := tls.Server(&insertingConn{conn, clientHello}, tlsConfig)
err = tlsConn.Handshake()
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error in handshake with client: %v", err), false, tlsFingerprint)
conn.Close()
return
}
logTLS(user, session.ServerAddr, serverName, nil, false, tlsFingerprint)
if http2Downstream {
http2.ConfigureServer(server, nil)
}
listener := &singleListener{conn: tlsConn}
server.Serve(listener)
// Wait for the connection to finish.
<-closeChan
}
// A TLSSession is the parameter for the Starlark ssl_bump function.
type TLSSession struct {
SNI string
ServerAddr string
User string
ClientIP string
// SourceIP is the IP address of the network interface to be used fo dial
// the upstream connection.
SourceIP net.IP
// ConnectHeader is the header from the CONNECT request, if any.
ConnectHeader http.Header
scoresAndACLs
frozen bool
misc SyncDict
}
type scoresAndACLs struct {
ACLs StringSet
Scores StringIntDict
Tally map[rule]int
PossibleActions []string
Action ACLActionRule
Ignored []string
}
func (s *scoresAndACLs) currentAction() (ar ACLActionRule, ignored []string) {
if s.Action.Action != "" {
return s.Action, s.Ignored
}
conf := getConfig()
ar, ignored = conf.ChooseACLCategoryAction(s.ACLs.data, s.Scores.data, conf.Threshold, s.PossibleActions...)
if ar.Action == "" {
ar.Action = "allow"
}
return ar, ignored
}
func (s *scoresAndACLs) chooseAction() {
s.Action, s.Ignored = s.currentAction()
}
func (s *scoresAndACLs) setAction(newAction string) error {
for _, a := range s.PossibleActions {
if newAction == a {
s.Action = ACLActionRule{
Action: newAction,
Needed: []string{"starlark"},
}
return nil
}
}
return fmt.Errorf("can't set action to %q; expected one of %q", newAction, s.PossibleActions)
}
func (s *TLSSession) String() string {
return fmt.Sprintf("TLSSession(%q, %q)", s.SNI, s.ServerAddr)
}
func (s *TLSSession) Type() string {
return "TLSSession"
}
func (s *TLSSession) Freeze() {
if !s.frozen {
s.frozen = true
s.ACLs.Freeze()
s.Scores.Freeze()
}
}
func (s *TLSSession) Truth() starlark.Bool {
return starlark.True
}
func (s *TLSSession) Hash() (uint32, error) {
return 0, errors.New("unhashable type: TLSSession")
}
var tlsSessionAttrNames = []string{"sni", "server_addr", "user", "client_ip", "acls", "scores", "source_ip", "action", "possible_actions", "header", "misc"}
func (s *TLSSession) AttrNames() []string {
return tlsSessionAttrNames
}
func (s *TLSSession) Attr(name string) (starlark.Value, error) {
switch name {
case "sni":
return starlark.String(s.SNI), nil
case "server_addr":
return starlark.String(s.ServerAddr), nil
case "user":
return starlark.String(s.User), nil
case "client_ip":
return starlark.String(s.ClientIP), nil
case "source_ip":
return starlark.String(s.SourceIP.String()), nil
case "acls":
return &s.ACLs, nil
case "scores":
return &s.Scores, nil
case "action":
ar, _ := s.currentAction()
return starlark.String(ar.Action), nil
case "possible_actions":
return stringTuple(s.PossibleActions), nil
case "header":
return &HeaderDict{data: s.ConnectHeader}, nil
case "misc":
return &s.misc, nil
default:
return nil, nil
}
}
func (s *TLSSession) SetField(name string, val starlark.Value) error {
if s.frozen {
return errors.New("can't set a field of a frozen object")
}
switch name {
case "sni":
return assignStarlarkString(&s.SNI, val)
case "server_addr":
return assignStarlarkString(&s.ServerAddr, val)
case "source_ip":
var ip string
if err := assignStarlarkString(&ip, val); err != nil {
return err
}
parsed := net.ParseIP(ip)
if parsed == nil {
return fmt.Errorf("%q is not a valid IP address", ip)
}
s.SourceIP = parsed
return nil
case "action":
var newAction string
if err := assignStarlarkString(&newAction, val); err != nil {
return err
}
return s.setAction(newAction)
default:
return starlark.NoSuchAttrError(fmt.Sprintf("can't assign to .%s field of TLSSession", name))
}
}
var errCertMismatch = errors.New("server certificate changed between original connection and redial")
func certPoolWith(certs []*x509.Certificate) *x509.CertPool {
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool
}
// A insertingConn is a net.Conn that inserts extra data at the start of the
// incoming data stream.
type insertingConn struct {
net.Conn
extraData []byte
}
func (c *insertingConn) Read(p []byte) (n int, err error) {
if len(c.extraData) == 0 {
return c.Conn.Read(p)
}
n = copy(p, c.extraData)
c.extraData = c.extraData[n:]
return
}
// A singleListener is a net.Listener that returns a single connection, then
// gives the error io.EOF.
type singleListener struct {
conn net.Conn
once sync.Once
}
func (s *singleListener) Accept() (net.Conn, error) {
var c net.Conn
s.once.Do(func() {
c = s.conn
})
if c != nil {
return c, nil
}
return nil, io.EOF
}
func (s *singleListener) Close() error {
s.once.Do(func() {
s.conn.Close()
})
return nil
}
func (s *singleListener) Addr() net.Addr {
return s.conn.LocalAddr()
}
// imitateCertificate returns a new TLS certificate that has most of the same
// data as serverCert but is signed by Redwood's root certificate, or
// self-signed.
func imitateCertificate(serverCert *x509.Certificate, selfSigned bool, sni string) (cert tls.Certificate, err error) {
conf := getConfig()
// Use a hash of the real certificate (plus some other things) as the serial number.
h := md5.New()
h.Write(serverCert.Raw)
for _, c := range conf.TLSCert.Certificate {
h.Write(c)
}
if sni != "" {
io.WriteString(h, sni)
}
template := &x509.Certificate{
SerialNumber: big.NewInt(0).SetBytes(h.Sum(nil)),
Subject: serverCert.Subject,
NotBefore: serverCert.NotBefore,
NotAfter: serverCert.NotAfter,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: serverCert.ExtKeyUsage,
UnknownExtKeyUsage: serverCert.UnknownExtKeyUsage,
BasicConstraintsValid: false,
SubjectKeyId: nil,
DNSNames: serverCert.DNSNames,
PermittedDNSDomainsCritical: serverCert.PermittedDNSDomainsCritical,
PermittedDNSDomains: serverCert.PermittedDNSDomains,
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
// If sni is not blank, make a certificate that covers only that domain,
// instead of all the domains covered by the original certificate.
if sni != "" {
template.DNSNames = []string{sni}
template.Subject.CommonName = sni
}
var newCertBytes []byte
if selfSigned {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, template, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
} else {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
}
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
if !selfSigned {
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
}
return newCert, nil
}
// fakeCertificate returns a fabricated certificate for the server identified by sni.
func fakeCertificate(sni string) (cert tls.Certificate, err error) {
conf := getConfig()
serial, err := rand.Int(rand.Reader, big.NewInt(1<<62))
if err != nil {
return tls.Certificate{}, err
}
y, m, d := time.Now().Date()
template := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{CommonName: sni},
NotBefore: time.Date(y, m, d, 0, 0, 0, 0, time.Local),
NotAfter: time.Date(y, m+1, d, 0, 0, 0, 0, time.Local),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
DNSNames: []string{sni},
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
newCertBytes, err := x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
return newCert, nil
}
func validCert(cert *x509.Certificate, intermediates []*x509.Certificate) bool {
conf := getConfig()
pool := certPoolWith(intermediates)
_, err := cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
if conf.ExtraRootCerts != nil {
_, err = cert.Verify(x509.VerifyOptions{Roots: conf.ExtraRootCerts, Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
return true
}
}
// Before we give up, we'll try fetching some intermediate certificates.
if len(cert.IssuingCertificateURL) == 0 {
return false
}
toFetch := cert.IssuingCertificateURL
fetched := make(map[string]bool)
for i := 0; i < len(toFetch); i++ {
certURL := toFetch[i]
if fetched[certURL] {
continue
}
resp, err := http.Get(certURL)
if err == nil {
defer resp.Body.Close()
}
if err != nil || resp.StatusCode != 200 {
continue
}
fetchedCert, err := ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
// The fetched certificate might be in either DER or PEM format.
if bytes.Contains(fetchedCert, []byte("-----BEGIN CERTIFICATE-----")) {
// It's PEM.
var certDER *pem.Block
for {
certDER, fetchedCert = pem.Decode(fetchedCert)
if certDER == nil {
break
}
if certDER.Type != "CERTIFICATE" {
continue
}
thisCert, err := x509.ParseCertificate(certDER.Bytes)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
} else {
// Hopefully it's DER.
thisCert, err := x509.ParseCertificate(fetchedCert)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
}
_, err = cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
return false
}
var ErrObsoleteSSLVersion = errors.New("obsolete SSL protocol version")
var ErrInvalidSSL = errors.New("invalid first byte for SSL connection; possibly some other protocol")
func readClientHello(conn net.Conn) (hello []byte, err error) {
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
defer conn.SetReadDeadline(time.Time{})
var header [5]byte
n, err := io.ReadFull(conn, header[:])
hello = header[:n]
if err != nil {
return hello, err
}
if header[0] != 22 {
if header[0] == 128 {
return hello, ErrObsoleteSSLVersion
}
return hello, ErrInvalidSSL
}
if header[1] != 3 {
return hello, fmt.Errorf("expected major version of 3, got %d", header[1])
}
recordLen := int(header[3])<<8 | int(header[4])
if recordLen > 0x3000 {
return hello, fmt.Errorf("expected length less than 12kB, got %d", recordLen)
}
if recordLen < 4 {
return hello, fmt.Errorf("expected length of at least 4 bytes, got %d", recordLen)
}
protocolData := make([]byte, recordLen)
n, err = io.ReadFull(conn, protocolData)
hello = append(hello, protocolData[:n]...)
if err != nil {
return hello, err
}
if protocolData[0] != 1 {
return hello, fmt.Errorf("Expected message type 1 (ClientHello), got %d", protocolData[0])
}
protocolLen := int(protocolData[1])<<16 | int(protocolData[2])<<8 | int(protocolData[3])
if protocolLen != recordLen-4 {
return hello, fmt.Errorf("recordLen=%d, protocolLen=%d", recordLen, protocolLen)
}
return hello, nil
}
// parseClientHello parses some useful information out of a ClientHello message.
// It returns a ClientHelloInfo with only the following fields filled in:
// ServerName and SupportedProtocols.
func parseClientHello(data []byte) (*tls.ClientHelloInfo, error) {
// The implementation of this function is based on crypto/tls.clientHelloMsg.unmarshal
var info tls.ClientHelloInfo
s := cryptobyte.String(data)
// Skip message type, length, version, and random.
if !s.Skip(43) {
return nil, errors.New("too short")
}
var sessionID cryptobyte.String
if !s.ReadUint8LengthPrefixed(&sessionID) {
return nil, errors.New("bad session ID")
}
var cipherSuites cryptobyte.String
if !s.ReadUint16LengthPrefixed(&cipherSuites) {
return nil, errors.New("bad cipher suites")
}
var compressionMethods cryptobyte.String
if !s.ReadUint8LengthPrefixed(&compressionMethods) {
return nil, errors.New("bad compression methods")
}
if s.Empty() {
// no extensions
return &info, nil
}
var extensions cryptobyte.String
if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
return nil, errors.New("bad extensions")
}
for !extensions.Empty() {
var extension uint16
var extData cryptobyte.String
if !extensions.ReadUint16(&extension) || !extensions.ReadUint16LengthPrefixed(&extData) {
return nil, errors.New("bad extension")
}
switch extension {
case 0: // server name
var nameList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
return nil, errors.New("bad name list")
}
for !nameList.Empty() {
var nameType uint8
var serverName cryptobyte.String
if !nameList.ReadUint8(&nameType) || !nameList.ReadUint16LengthPrefixed(&serverName) || serverName.Empty() {
return nil, errors.New("bad entry in name list")
}
if nameType != 0 {
continue
}
if info.ServerName != "" {
return nil, errors.New("multiple server names")
}
info.ServerName = string(serverName)
if strings.HasSuffix(info.ServerName, ".") {
return nil, errors.New("server name ends with dot")
}
}
case 16: // ALPN
var protoList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
return nil, errors.New("bad ALPN protocol list")
}
for !protoList.Empty() {
var proto cryptobyte.String
if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
return nil, errors.New("bad ALPN protocol list entry")
}
info.SupportedProtos = append(info.SupportedProtos, string(proto))
}
default:
// ignore
continue
}
if !extData.Empty() {
return nil, errors.New("extra data at end of extension")
}
}
return &info, nil
}
func (c *config) | (certPath string) error {
if c.ExtraRootCerts == nil {
c.ExtraRootCerts = x509.NewCertPool()
}
pem, err := ioutil.ReadFile(certPath)
if err != nil {
return err
}
if !c.ExtraRootCerts.AppendCertsFromPEM(pem) {
return fmt.Errorf("no certificates found in %s", certPath)
}
return nil
}
| addTrustedRoots | identifier_name |
tls.go | package main
import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/big"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"sync"
"time"
"github.com/open-ch/ja3"
"go.starlark.net/starlark"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/net/http2"
)
// Intercept TLS (HTTPS) connections.
// loadCertificate loads the TLS certificate specified by certFile and keyFile
// into tlsCert.
func (c *config) loadCertificate() {
if c.CertFile != "" && c.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
log.Println("Error loading TLS certificate:", err)
return
}
c.TLSCert = cert
parsed, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
log.Println("Error parsing X509 certificate:", err)
return
}
c.ParsedTLSCert = parsed
c.TLSReady = true
c.ServeMux.HandleFunc("/cert.der", func(w http.ResponseWriter, r *http.Request) {
tlsCert := c.TLSCert
w.Header().Set("Content-Type", "application/x-x509-ca-cert")
w.Write(tlsCert.Certificate[len(tlsCert.Certificate)-1])
})
}
}
// connectDirect connects to serverAddr and copies data between it and conn.
// extraData is sent to the server first.
func connectDirect(conn net.Conn, serverAddr string, extraData []byte, dialer *net.Dialer) (uploaded, downloaded int64) {
activeConnections.Add(1)
defer activeConnections.Done()
serverConn, err := dialer.Dial("tcp", serverAddr)
if err != nil {
log.Printf("error with pass-through of SSL connection to %s: %s", serverAddr, err)
conn.Close()
return
}
if extraData != nil {
// There may also be data waiting in the socket's input buffer;
// read it before we send the data on, so that the first packet of
// the connection doesn't get split in two.
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
buf := make([]byte, 2000)
n, _ := conn.Read(buf)
conn.SetReadDeadline(time.Time{})
if n > 0 {
extraData = append(extraData, buf[:n]...)
}
serverConn.Write(extraData)
}
ulChan := make(chan int64)
go func() {
n, _ := io.Copy(conn, serverConn)
time.Sleep(time.Second)
conn.Close()
ulChan <- n + int64(len(extraData))
}()
downloaded, _ = io.Copy(serverConn, conn)
serverConn.Close()
uploaded = <-ulChan
return uploaded, downloaded
}
type tlsFingerprintKey struct{}
// SSLBump performs a man-in-the-middle attack on conn, to filter the HTTPS
// traffic. serverAddr is the address (host:port) of the server the client was
// trying to connect to. user is the username to use for logging; authUser is
// the authenticated user, if any; r is the CONNECT request, if any.
func SSLBump(conn net.Conn, serverAddr, user, authUser string, r *http.Request) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, 4096)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("SSLBump: panic serving connection to %s: %v\n%s", serverAddr, err, buf)
conn.Close()
}
}()
session := &TLSSession{
ServerAddr: serverAddr,
User: authUser,
}
if r != nil {
session.ConnectHeader = r.Header
}
| session.ClientIP = client
}
obsoleteVersion := false
invalidSSL := false
// Read the client hello so that we can find out the name of the server (not
// just the address).
clientHello, err := readClientHello(conn)
if err != nil {
logTLS(user, serverAddr, "", fmt.Errorf("error reading client hello: %v", err), false, "")
if _, ok := err.(net.Error); ok {
conn.Close()
return
} else if err == ErrObsoleteSSLVersion {
obsoleteVersion = true
if getConfig().BlockObsoleteSSL {
conn.Close()
return
}
} else if err == ErrInvalidSSL {
invalidSSL = true
} else {
conn.Close()
return
}
}
clientHelloInfo, err := parseClientHello(clientHello)
host, port, err := net.SplitHostPort(serverAddr)
if err != nil {
host = serverAddr
port = "443"
}
serverName := ""
if !obsoleteVersion && !invalidSSL {
if clientHelloInfo != nil && clientHelloInfo.ServerName != "" {
serverName = clientHelloInfo.ServerName
}
}
session.SNI = serverName
if session.ServerAddr == "" {
session.ServerAddr = net.JoinHostPort(serverName, "443")
}
if serverName == "" {
serverName = host
if ip := net.ParseIP(serverName); ip != nil {
// All we have is an IP address, not a name from a CONNECT request.
// See if we can do better by reverse DNS.
names, err := net.LookupAddr(serverName)
if err == nil && len(names) > 0 {
serverName = strings.TrimSuffix(names[0], ".")
}
}
}
if serverName == "" {
logTLS(user, "", "", errors.New("no SNI available"), false, "")
conn.Close()
return
}
// Filter a virtual CONNECT request.
cr := &http.Request{
Method: "CONNECT",
Header: make(http.Header),
Host: net.JoinHostPort(serverName, port),
URL: &url.URL{Host: serverName},
RemoteAddr: conn.RemoteAddr().String(),
}
var tlsFingerprint string
j, err := ja3.ComputeJA3FromSegment(clientHello)
if err != nil {
log.Printf("Error generating TLS fingerprint: %v", err)
} else {
tlsFingerprint = j.GetJA3Hash()
ctx := cr.Context()
ctx = context.WithValue(ctx, tlsFingerprintKey{}, tlsFingerprint)
cr = cr.WithContext(ctx)
}
var tally map[rule]int
var scores map[string]int
var reqACLs map[string]bool
{
conf := getConfig()
tally = conf.URLRules.MatchingRules(cr.URL)
scores = conf.categoryScores(tally)
reqACLs = conf.ACLs.requestACLs(cr, authUser)
if invalidSSL {
reqACLs["invalid-ssl"] = true
}
if r == nil {
// It's a transparently-intercepted request instead of a real
// CONNECT request.
reqACLs["transparent"] = true
}
}
session.ACLs.data = reqACLs
session.Scores.data = scores
session.PossibleActions = []string{"allow", "block"}
if getConfig().TLSReady && !obsoleteVersion && !invalidSSL {
session.PossibleActions = append(session.PossibleActions, "ssl-bump")
}
callStarlarkFunctions("ssl_bump", session)
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
if session.SourceIP != nil {
dialer.LocalAddr = &net.TCPAddr{
IP: session.SourceIP,
}
}
session.chooseAction()
logAccess(cr, nil, 0, false, user, tally, scores, session.Action, "", session.Ignored, nil)
switch session.Action.Action {
case "allow", "":
upload, download := connectDirect(conn, session.ServerAddr, clientHello, dialer)
logAccess(cr, nil, upload+download, false, user, tally, scores, session.Action, "", session.Ignored, nil)
return
case "block":
conn.Close()
return
}
var cert tls.Certificate
var rt http.RoundTripper
var http2Support bool
closeChan := make(chan struct{})
server := &http.Server{
IdleTimeout: getConfig().CloseIdleConnections,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateClosed:
close(closeChan)
}
},
}
serverConnConfig := &tls.Config{
ServerName: session.SNI,
InsecureSkipVerify: true,
}
clientSupportsHTTP2 := false
if clientHelloInfo != nil {
for _, p := range clientHelloInfo.SupportedProtos {
if p == "h2" {
clientSupportsHTTP2 = true
}
}
}
if clientSupportsHTTP2 && getConfig().HTTP2Upstream {
serverConnConfig.NextProtos = []string{"h2", "http/1.1"}
}
serverConn, err := tls.DialWithDialer(dialer, "tcp", session.ServerAddr, serverConnConfig)
if err == nil {
defer serverConn.Close()
state := serverConn.ConnectionState()
serverCert := state.PeerCertificates[0]
valid := validCert(serverCert, state.PeerCertificates[1:])
cert, err = imitateCertificate(serverCert, !valid, session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
connectDirect(conn, session.ServerAddr, clientHello, dialer)
return
}
http2Support = state.NegotiatedProtocol == "h2" && state.NegotiatedProtocolIsMutual
d := &tls.Dialer{
NetDialer: dialer,
Config: &tls.Config{
ServerName: session.SNI,
RootCAs: certPoolWith(serverConn.ConnectionState().PeerCertificates),
},
}
if !valid {
d.Config.InsecureSkipVerify = true
originalCert := serverConn.ConnectionState().PeerCertificates[0]
d.Config.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
cert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return err
}
if cert.Equal(originalCert) {
return nil
}
return errCertMismatch
}
}
if http2Support {
d.Config.NextProtos = []string{"h2"}
var once sync.Once
rt = &http2.Transport{
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
var c net.Conn
once.Do(func() {
c = serverConn
})
if c != nil {
return c, nil
}
logVerbose("redial", "Redialing HTTP/2 connection to %s (%s)", session.SNI, session.ServerAddr)
return d.Dial("tcp", session.ServerAddr)
},
TLSClientConfig: d.Config,
StrictMaxConcurrentStreams: true,
}
} else {
rt = &connTransport{
Conn: serverConn,
Redial: func(ctx context.Context) (net.Conn, error) {
logVerbose("redial", "Redialing connection to %s (%s)", session.SNI, session.ServerAddr)
return d.DialContext(ctx, "tcp", session.ServerAddr)
},
}
}
} else {
cert, err = fakeCertificate(session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
conn.Close()
return
}
rt = httpTransport
}
session.Freeze()
server.Handler = &proxyHandler{
TLS: true,
tlsFingerprint: tlsFingerprint,
connectPort: port,
user: authUser,
rt: rt,
session: session,
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert, getConfig().TLSCert},
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519, // Go 1.8 only
},
}
http2Downstream := getConfig().HTTP2Downstream && http2Support
if http2Downstream {
tlsConfig.NextProtos = []string{"h2", "http/1.1"}
}
tlsConn := tls.Server(&insertingConn{conn, clientHello}, tlsConfig)
err = tlsConn.Handshake()
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error in handshake with client: %v", err), false, tlsFingerprint)
conn.Close()
return
}
logTLS(user, session.ServerAddr, serverName, nil, false, tlsFingerprint)
if http2Downstream {
http2.ConfigureServer(server, nil)
}
listener := &singleListener{conn: tlsConn}
server.Serve(listener)
// Wait for the connection to finish.
<-closeChan
}
// A TLSSession is the parameter for the Starlark ssl_bump function.
type TLSSession struct {
SNI string
ServerAddr string
User string
ClientIP string
// SourceIP is the IP address of the network interface to be used fo dial
// the upstream connection.
SourceIP net.IP
// ConnectHeader is the header from the CONNECT request, if any.
ConnectHeader http.Header
scoresAndACLs
frozen bool
misc SyncDict
}
type scoresAndACLs struct {
ACLs StringSet
Scores StringIntDict
Tally map[rule]int
PossibleActions []string
Action ACLActionRule
Ignored []string
}
func (s *scoresAndACLs) currentAction() (ar ACLActionRule, ignored []string) {
if s.Action.Action != "" {
return s.Action, s.Ignored
}
conf := getConfig()
ar, ignored = conf.ChooseACLCategoryAction(s.ACLs.data, s.Scores.data, conf.Threshold, s.PossibleActions...)
if ar.Action == "" {
ar.Action = "allow"
}
return ar, ignored
}
func (s *scoresAndACLs) chooseAction() {
s.Action, s.Ignored = s.currentAction()
}
func (s *scoresAndACLs) setAction(newAction string) error {
for _, a := range s.PossibleActions {
if newAction == a {
s.Action = ACLActionRule{
Action: newAction,
Needed: []string{"starlark"},
}
return nil
}
}
return fmt.Errorf("can't set action to %q; expected one of %q", newAction, s.PossibleActions)
}
func (s *TLSSession) String() string {
return fmt.Sprintf("TLSSession(%q, %q)", s.SNI, s.ServerAddr)
}
func (s *TLSSession) Type() string {
return "TLSSession"
}
func (s *TLSSession) Freeze() {
if !s.frozen {
s.frozen = true
s.ACLs.Freeze()
s.Scores.Freeze()
}
}
func (s *TLSSession) Truth() starlark.Bool {
return starlark.True
}
func (s *TLSSession) Hash() (uint32, error) {
return 0, errors.New("unhashable type: TLSSession")
}
var tlsSessionAttrNames = []string{"sni", "server_addr", "user", "client_ip", "acls", "scores", "source_ip", "action", "possible_actions", "header", "misc"}
func (s *TLSSession) AttrNames() []string {
return tlsSessionAttrNames
}
func (s *TLSSession) Attr(name string) (starlark.Value, error) {
switch name {
case "sni":
return starlark.String(s.SNI), nil
case "server_addr":
return starlark.String(s.ServerAddr), nil
case "user":
return starlark.String(s.User), nil
case "client_ip":
return starlark.String(s.ClientIP), nil
case "source_ip":
return starlark.String(s.SourceIP.String()), nil
case "acls":
return &s.ACLs, nil
case "scores":
return &s.Scores, nil
case "action":
ar, _ := s.currentAction()
return starlark.String(ar.Action), nil
case "possible_actions":
return stringTuple(s.PossibleActions), nil
case "header":
return &HeaderDict{data: s.ConnectHeader}, nil
case "misc":
return &s.misc, nil
default:
return nil, nil
}
}
func (s *TLSSession) SetField(name string, val starlark.Value) error {
if s.frozen {
return errors.New("can't set a field of a frozen object")
}
switch name {
case "sni":
return assignStarlarkString(&s.SNI, val)
case "server_addr":
return assignStarlarkString(&s.ServerAddr, val)
case "source_ip":
var ip string
if err := assignStarlarkString(&ip, val); err != nil {
return err
}
parsed := net.ParseIP(ip)
if parsed == nil {
return fmt.Errorf("%q is not a valid IP address", ip)
}
s.SourceIP = parsed
return nil
case "action":
var newAction string
if err := assignStarlarkString(&newAction, val); err != nil {
return err
}
return s.setAction(newAction)
default:
return starlark.NoSuchAttrError(fmt.Sprintf("can't assign to .%s field of TLSSession", name))
}
}
var errCertMismatch = errors.New("server certificate changed between original connection and redial")
func certPoolWith(certs []*x509.Certificate) *x509.CertPool {
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool
}
// A insertingConn is a net.Conn that inserts extra data at the start of the
// incoming data stream.
type insertingConn struct {
net.Conn
extraData []byte
}
func (c *insertingConn) Read(p []byte) (n int, err error) {
if len(c.extraData) == 0 {
return c.Conn.Read(p)
}
n = copy(p, c.extraData)
c.extraData = c.extraData[n:]
return
}
// A singleListener is a net.Listener that returns a single connection, then
// gives the error io.EOF.
type singleListener struct {
conn net.Conn
once sync.Once
}
func (s *singleListener) Accept() (net.Conn, error) {
var c net.Conn
s.once.Do(func() {
c = s.conn
})
if c != nil {
return c, nil
}
return nil, io.EOF
}
func (s *singleListener) Close() error {
s.once.Do(func() {
s.conn.Close()
})
return nil
}
func (s *singleListener) Addr() net.Addr {
return s.conn.LocalAddr()
}
// imitateCertificate returns a new TLS certificate that has most of the same
// data as serverCert but is signed by Redwood's root certificate, or
// self-signed.
func imitateCertificate(serverCert *x509.Certificate, selfSigned bool, sni string) (cert tls.Certificate, err error) {
conf := getConfig()
// Use a hash of the real certificate (plus some other things) as the serial number.
h := md5.New()
h.Write(serverCert.Raw)
for _, c := range conf.TLSCert.Certificate {
h.Write(c)
}
if sni != "" {
io.WriteString(h, sni)
}
template := &x509.Certificate{
SerialNumber: big.NewInt(0).SetBytes(h.Sum(nil)),
Subject: serverCert.Subject,
NotBefore: serverCert.NotBefore,
NotAfter: serverCert.NotAfter,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: serverCert.ExtKeyUsage,
UnknownExtKeyUsage: serverCert.UnknownExtKeyUsage,
BasicConstraintsValid: false,
SubjectKeyId: nil,
DNSNames: serverCert.DNSNames,
PermittedDNSDomainsCritical: serverCert.PermittedDNSDomainsCritical,
PermittedDNSDomains: serverCert.PermittedDNSDomains,
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
// If sni is not blank, make a certificate that covers only that domain,
// instead of all the domains covered by the original certificate.
if sni != "" {
template.DNSNames = []string{sni}
template.Subject.CommonName = sni
}
var newCertBytes []byte
if selfSigned {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, template, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
} else {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
}
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
if !selfSigned {
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
}
return newCert, nil
}
// fakeCertificate returns a fabricated certificate for the server identified by sni.
func fakeCertificate(sni string) (cert tls.Certificate, err error) {
conf := getConfig()
serial, err := rand.Int(rand.Reader, big.NewInt(1<<62))
if err != nil {
return tls.Certificate{}, err
}
y, m, d := time.Now().Date()
template := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{CommonName: sni},
NotBefore: time.Date(y, m, d, 0, 0, 0, 0, time.Local),
NotAfter: time.Date(y, m+1, d, 0, 0, 0, 0, time.Local),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
DNSNames: []string{sni},
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
newCertBytes, err := x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
return newCert, nil
}
func validCert(cert *x509.Certificate, intermediates []*x509.Certificate) bool {
conf := getConfig()
pool := certPoolWith(intermediates)
_, err := cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
if conf.ExtraRootCerts != nil {
_, err = cert.Verify(x509.VerifyOptions{Roots: conf.ExtraRootCerts, Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
return true
}
}
// Before we give up, we'll try fetching some intermediate certificates.
if len(cert.IssuingCertificateURL) == 0 {
return false
}
toFetch := cert.IssuingCertificateURL
fetched := make(map[string]bool)
for i := 0; i < len(toFetch); i++ {
certURL := toFetch[i]
if fetched[certURL] {
continue
}
resp, err := http.Get(certURL)
if err == nil {
defer resp.Body.Close()
}
if err != nil || resp.StatusCode != 200 {
continue
}
fetchedCert, err := ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
// The fetched certificate might be in either DER or PEM format.
if bytes.Contains(fetchedCert, []byte("-----BEGIN CERTIFICATE-----")) {
// It's PEM.
var certDER *pem.Block
for {
certDER, fetchedCert = pem.Decode(fetchedCert)
if certDER == nil {
break
}
if certDER.Type != "CERTIFICATE" {
continue
}
thisCert, err := x509.ParseCertificate(certDER.Bytes)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
} else {
// Hopefully it's DER.
thisCert, err := x509.ParseCertificate(fetchedCert)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
}
_, err = cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
return false
}
var ErrObsoleteSSLVersion = errors.New("obsolete SSL protocol version")
var ErrInvalidSSL = errors.New("invalid first byte for SSL connection; possibly some other protocol")
func readClientHello(conn net.Conn) (hello []byte, err error) {
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
defer conn.SetReadDeadline(time.Time{})
var header [5]byte
n, err := io.ReadFull(conn, header[:])
hello = header[:n]
if err != nil {
return hello, err
}
if header[0] != 22 {
if header[0] == 128 {
return hello, ErrObsoleteSSLVersion
}
return hello, ErrInvalidSSL
}
if header[1] != 3 {
return hello, fmt.Errorf("expected major version of 3, got %d", header[1])
}
recordLen := int(header[3])<<8 | int(header[4])
if recordLen > 0x3000 {
return hello, fmt.Errorf("expected length less than 12kB, got %d", recordLen)
}
if recordLen < 4 {
return hello, fmt.Errorf("expected length of at least 4 bytes, got %d", recordLen)
}
protocolData := make([]byte, recordLen)
n, err = io.ReadFull(conn, protocolData)
hello = append(hello, protocolData[:n]...)
if err != nil {
return hello, err
}
if protocolData[0] != 1 {
return hello, fmt.Errorf("Expected message type 1 (ClientHello), got %d", protocolData[0])
}
protocolLen := int(protocolData[1])<<16 | int(protocolData[2])<<8 | int(protocolData[3])
if protocolLen != recordLen-4 {
return hello, fmt.Errorf("recordLen=%d, protocolLen=%d", recordLen, protocolLen)
}
return hello, nil
}
// parseClientHello parses some useful information out of a ClientHello message.
// It returns a ClientHelloInfo with only the following fields filled in:
// ServerName and SupportedProtocols.
func parseClientHello(data []byte) (*tls.ClientHelloInfo, error) {
// The implementation of this function is based on crypto/tls.clientHelloMsg.unmarshal
var info tls.ClientHelloInfo
s := cryptobyte.String(data)
// Skip message type, length, version, and random.
if !s.Skip(43) {
return nil, errors.New("too short")
}
var sessionID cryptobyte.String
if !s.ReadUint8LengthPrefixed(&sessionID) {
return nil, errors.New("bad session ID")
}
var cipherSuites cryptobyte.String
if !s.ReadUint16LengthPrefixed(&cipherSuites) {
return nil, errors.New("bad cipher suites")
}
var compressionMethods cryptobyte.String
if !s.ReadUint8LengthPrefixed(&compressionMethods) {
return nil, errors.New("bad compression methods")
}
if s.Empty() {
// no extensions
return &info, nil
}
var extensions cryptobyte.String
if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
return nil, errors.New("bad extensions")
}
for !extensions.Empty() {
var extension uint16
var extData cryptobyte.String
if !extensions.ReadUint16(&extension) || !extensions.ReadUint16LengthPrefixed(&extData) {
return nil, errors.New("bad extension")
}
switch extension {
case 0: // server name
var nameList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
return nil, errors.New("bad name list")
}
for !nameList.Empty() {
var nameType uint8
var serverName cryptobyte.String
if !nameList.ReadUint8(&nameType) || !nameList.ReadUint16LengthPrefixed(&serverName) || serverName.Empty() {
return nil, errors.New("bad entry in name list")
}
if nameType != 0 {
continue
}
if info.ServerName != "" {
return nil, errors.New("multiple server names")
}
info.ServerName = string(serverName)
if strings.HasSuffix(info.ServerName, ".") {
return nil, errors.New("server name ends with dot")
}
}
case 16: // ALPN
var protoList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
return nil, errors.New("bad ALPN protocol list")
}
for !protoList.Empty() {
var proto cryptobyte.String
if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
return nil, errors.New("bad ALPN protocol list entry")
}
info.SupportedProtos = append(info.SupportedProtos, string(proto))
}
default:
// ignore
continue
}
if !extData.Empty() {
return nil, errors.New("extra data at end of extension")
}
}
return &info, nil
}
func (c *config) addTrustedRoots(certPath string) error {
if c.ExtraRootCerts == nil {
c.ExtraRootCerts = x509.NewCertPool()
}
pem, err := ioutil.ReadFile(certPath)
if err != nil {
return err
}
if !c.ExtraRootCerts.AppendCertsFromPEM(pem) {
return fmt.Errorf("no certificates found in %s", certPath)
}
return nil
} | client := conn.RemoteAddr().String()
if host, _, err := net.SplitHostPort(client); err == nil {
session.ClientIP = host
} else { | random_line_split |
tls.go | package main
import (
"bytes"
"context"
"crypto/md5"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/big"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"sync"
"time"
"github.com/open-ch/ja3"
"go.starlark.net/starlark"
"golang.org/x/crypto/cryptobyte"
"golang.org/x/net/http2"
)
// Intercept TLS (HTTPS) connections.
// loadCertificate loads the TLS certificate specified by certFile and keyFile
// into tlsCert.
func (c *config) loadCertificate() {
if c.CertFile != "" && c.KeyFile != "" {
cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile)
if err != nil {
log.Println("Error loading TLS certificate:", err)
return
}
c.TLSCert = cert
parsed, err := x509.ParseCertificate(cert.Certificate[0])
if err != nil {
log.Println("Error parsing X509 certificate:", err)
return
}
c.ParsedTLSCert = parsed
c.TLSReady = true
c.ServeMux.HandleFunc("/cert.der", func(w http.ResponseWriter, r *http.Request) {
tlsCert := c.TLSCert
w.Header().Set("Content-Type", "application/x-x509-ca-cert")
w.Write(tlsCert.Certificate[len(tlsCert.Certificate)-1])
})
}
}
// connectDirect connects to serverAddr and copies data between it and conn.
// extraData is sent to the server first.
func connectDirect(conn net.Conn, serverAddr string, extraData []byte, dialer *net.Dialer) (uploaded, downloaded int64) {
activeConnections.Add(1)
defer activeConnections.Done()
serverConn, err := dialer.Dial("tcp", serverAddr)
if err != nil {
log.Printf("error with pass-through of SSL connection to %s: %s", serverAddr, err)
conn.Close()
return
}
if extraData != nil {
// There may also be data waiting in the socket's input buffer;
// read it before we send the data on, so that the first packet of
// the connection doesn't get split in two.
conn.SetReadDeadline(time.Now().Add(time.Millisecond))
buf := make([]byte, 2000)
n, _ := conn.Read(buf)
conn.SetReadDeadline(time.Time{})
if n > 0 {
extraData = append(extraData, buf[:n]...)
}
serverConn.Write(extraData)
}
ulChan := make(chan int64)
go func() {
n, _ := io.Copy(conn, serverConn)
time.Sleep(time.Second)
conn.Close()
ulChan <- n + int64(len(extraData))
}()
downloaded, _ = io.Copy(serverConn, conn)
serverConn.Close()
uploaded = <-ulChan
return uploaded, downloaded
}
type tlsFingerprintKey struct{}
// SSLBump performs a man-in-the-middle attack on conn, to filter the HTTPS
// traffic. serverAddr is the address (host:port) of the server the client was
// trying to connect to. user is the username to use for logging; authUser is
// the authenticated user, if any; r is the CONNECT request, if any.
func SSLBump(conn net.Conn, serverAddr, user, authUser string, r *http.Request) {
defer func() {
if err := recover(); err != nil {
buf := make([]byte, 4096)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("SSLBump: panic serving connection to %s: %v\n%s", serverAddr, err, buf)
conn.Close()
}
}()
session := &TLSSession{
ServerAddr: serverAddr,
User: authUser,
}
if r != nil {
session.ConnectHeader = r.Header
}
client := conn.RemoteAddr().String()
if host, _, err := net.SplitHostPort(client); err == nil {
session.ClientIP = host
} else {
session.ClientIP = client
}
obsoleteVersion := false
invalidSSL := false
// Read the client hello so that we can find out the name of the server (not
// just the address).
clientHello, err := readClientHello(conn)
if err != nil {
logTLS(user, serverAddr, "", fmt.Errorf("error reading client hello: %v", err), false, "")
if _, ok := err.(net.Error); ok {
conn.Close()
return
} else if err == ErrObsoleteSSLVersion {
obsoleteVersion = true
if getConfig().BlockObsoleteSSL {
conn.Close()
return
}
} else if err == ErrInvalidSSL {
invalidSSL = true
} else {
conn.Close()
return
}
}
clientHelloInfo, err := parseClientHello(clientHello)
host, port, err := net.SplitHostPort(serverAddr)
if err != nil {
host = serverAddr
port = "443"
}
serverName := ""
if !obsoleteVersion && !invalidSSL {
if clientHelloInfo != nil && clientHelloInfo.ServerName != "" {
serverName = clientHelloInfo.ServerName
}
}
session.SNI = serverName
if session.ServerAddr == "" {
session.ServerAddr = net.JoinHostPort(serverName, "443")
}
if serverName == "" {
serverName = host
if ip := net.ParseIP(serverName); ip != nil {
// All we have is an IP address, not a name from a CONNECT request.
// See if we can do better by reverse DNS.
names, err := net.LookupAddr(serverName)
if err == nil && len(names) > 0 {
serverName = strings.TrimSuffix(names[0], ".")
}
}
}
if serverName == "" {
logTLS(user, "", "", errors.New("no SNI available"), false, "")
conn.Close()
return
}
// Filter a virtual CONNECT request.
cr := &http.Request{
Method: "CONNECT",
Header: make(http.Header),
Host: net.JoinHostPort(serverName, port),
URL: &url.URL{Host: serverName},
RemoteAddr: conn.RemoteAddr().String(),
}
var tlsFingerprint string
j, err := ja3.ComputeJA3FromSegment(clientHello)
if err != nil {
log.Printf("Error generating TLS fingerprint: %v", err)
} else {
tlsFingerprint = j.GetJA3Hash()
ctx := cr.Context()
ctx = context.WithValue(ctx, tlsFingerprintKey{}, tlsFingerprint)
cr = cr.WithContext(ctx)
}
var tally map[rule]int
var scores map[string]int
var reqACLs map[string]bool
{
conf := getConfig()
tally = conf.URLRules.MatchingRules(cr.URL)
scores = conf.categoryScores(tally)
reqACLs = conf.ACLs.requestACLs(cr, authUser)
if invalidSSL {
reqACLs["invalid-ssl"] = true
}
if r == nil {
// It's a transparently-intercepted request instead of a real
// CONNECT request.
reqACLs["transparent"] = true
}
}
session.ACLs.data = reqACLs
session.Scores.data = scores
session.PossibleActions = []string{"allow", "block"}
if getConfig().TLSReady && !obsoleteVersion && !invalidSSL {
session.PossibleActions = append(session.PossibleActions, "ssl-bump")
}
callStarlarkFunctions("ssl_bump", session)
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
if session.SourceIP != nil {
dialer.LocalAddr = &net.TCPAddr{
IP: session.SourceIP,
}
}
session.chooseAction()
logAccess(cr, nil, 0, false, user, tally, scores, session.Action, "", session.Ignored, nil)
switch session.Action.Action {
case "allow", "":
upload, download := connectDirect(conn, session.ServerAddr, clientHello, dialer)
logAccess(cr, nil, upload+download, false, user, tally, scores, session.Action, "", session.Ignored, nil)
return
case "block":
conn.Close()
return
}
var cert tls.Certificate
var rt http.RoundTripper
var http2Support bool
closeChan := make(chan struct{})
server := &http.Server{
IdleTimeout: getConfig().CloseIdleConnections,
ConnState: func(conn net.Conn, state http.ConnState) {
switch state {
case http.StateClosed:
close(closeChan)
}
},
}
serverConnConfig := &tls.Config{
ServerName: session.SNI,
InsecureSkipVerify: true,
}
clientSupportsHTTP2 := false
if clientHelloInfo != nil {
for _, p := range clientHelloInfo.SupportedProtos {
if p == "h2" {
clientSupportsHTTP2 = true
}
}
}
if clientSupportsHTTP2 && getConfig().HTTP2Upstream {
serverConnConfig.NextProtos = []string{"h2", "http/1.1"}
}
serverConn, err := tls.DialWithDialer(dialer, "tcp", session.ServerAddr, serverConnConfig)
if err == nil {
defer serverConn.Close()
state := serverConn.ConnectionState()
serverCert := state.PeerCertificates[0]
valid := validCert(serverCert, state.PeerCertificates[1:])
cert, err = imitateCertificate(serverCert, !valid, session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
connectDirect(conn, session.ServerAddr, clientHello, dialer)
return
}
http2Support = state.NegotiatedProtocol == "h2" && state.NegotiatedProtocolIsMutual
d := &tls.Dialer{
NetDialer: dialer,
Config: &tls.Config{
ServerName: session.SNI,
RootCAs: certPoolWith(serverConn.ConnectionState().PeerCertificates),
},
}
if !valid {
d.Config.InsecureSkipVerify = true
originalCert := serverConn.ConnectionState().PeerCertificates[0]
d.Config.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
cert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return err
}
if cert.Equal(originalCert) {
return nil
}
return errCertMismatch
}
}
if http2Support {
d.Config.NextProtos = []string{"h2"}
var once sync.Once
rt = &http2.Transport{
DialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {
var c net.Conn
once.Do(func() {
c = serverConn
})
if c != nil {
return c, nil
}
logVerbose("redial", "Redialing HTTP/2 connection to %s (%s)", session.SNI, session.ServerAddr)
return d.Dial("tcp", session.ServerAddr)
},
TLSClientConfig: d.Config,
StrictMaxConcurrentStreams: true,
}
} else {
rt = &connTransport{
Conn: serverConn,
Redial: func(ctx context.Context) (net.Conn, error) {
logVerbose("redial", "Redialing connection to %s (%s)", session.SNI, session.ServerAddr)
return d.DialContext(ctx, "tcp", session.ServerAddr)
},
}
}
} else {
cert, err = fakeCertificate(session.SNI)
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error generating certificate: %v", err), false, tlsFingerprint)
conn.Close()
return
}
rt = httpTransport
}
session.Freeze()
server.Handler = &proxyHandler{
TLS: true,
tlsFingerprint: tlsFingerprint,
connectPort: port,
user: authUser,
rt: rt,
session: session,
}
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert, getConfig().TLSCert},
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.CurveP256,
tls.X25519, // Go 1.8 only
},
}
http2Downstream := getConfig().HTTP2Downstream && http2Support
if http2Downstream {
tlsConfig.NextProtos = []string{"h2", "http/1.1"}
}
tlsConn := tls.Server(&insertingConn{conn, clientHello}, tlsConfig)
err = tlsConn.Handshake()
if err != nil {
logTLS(user, session.ServerAddr, serverName, fmt.Errorf("error in handshake with client: %v", err), false, tlsFingerprint)
conn.Close()
return
}
logTLS(user, session.ServerAddr, serverName, nil, false, tlsFingerprint)
if http2Downstream {
http2.ConfigureServer(server, nil)
}
listener := &singleListener{conn: tlsConn}
server.Serve(listener)
// Wait for the connection to finish.
<-closeChan
}
// A TLSSession is the parameter for the Starlark ssl_bump function.
type TLSSession struct {
SNI string
ServerAddr string
User string
ClientIP string
// SourceIP is the IP address of the network interface to be used fo dial
// the upstream connection.
SourceIP net.IP
// ConnectHeader is the header from the CONNECT request, if any.
ConnectHeader http.Header
scoresAndACLs
frozen bool
misc SyncDict
}
type scoresAndACLs struct {
ACLs StringSet
Scores StringIntDict
Tally map[rule]int
PossibleActions []string
Action ACLActionRule
Ignored []string
}
func (s *scoresAndACLs) currentAction() (ar ACLActionRule, ignored []string) {
if s.Action.Action != "" {
return s.Action, s.Ignored
}
conf := getConfig()
ar, ignored = conf.ChooseACLCategoryAction(s.ACLs.data, s.Scores.data, conf.Threshold, s.PossibleActions...)
if ar.Action == "" {
ar.Action = "allow"
}
return ar, ignored
}
func (s *scoresAndACLs) chooseAction() {
s.Action, s.Ignored = s.currentAction()
}
func (s *scoresAndACLs) setAction(newAction string) error {
for _, a := range s.PossibleActions {
if newAction == a {
s.Action = ACLActionRule{
Action: newAction,
Needed: []string{"starlark"},
}
return nil
}
}
return fmt.Errorf("can't set action to %q; expected one of %q", newAction, s.PossibleActions)
}
func (s *TLSSession) String() string {
return fmt.Sprintf("TLSSession(%q, %q)", s.SNI, s.ServerAddr)
}
func (s *TLSSession) Type() string {
return "TLSSession"
}
func (s *TLSSession) Freeze() {
if !s.frozen {
s.frozen = true
s.ACLs.Freeze()
s.Scores.Freeze()
}
}
func (s *TLSSession) Truth() starlark.Bool {
return starlark.True
}
func (s *TLSSession) Hash() (uint32, error) |
var tlsSessionAttrNames = []string{"sni", "server_addr", "user", "client_ip", "acls", "scores", "source_ip", "action", "possible_actions", "header", "misc"}
func (s *TLSSession) AttrNames() []string {
return tlsSessionAttrNames
}
func (s *TLSSession) Attr(name string) (starlark.Value, error) {
switch name {
case "sni":
return starlark.String(s.SNI), nil
case "server_addr":
return starlark.String(s.ServerAddr), nil
case "user":
return starlark.String(s.User), nil
case "client_ip":
return starlark.String(s.ClientIP), nil
case "source_ip":
return starlark.String(s.SourceIP.String()), nil
case "acls":
return &s.ACLs, nil
case "scores":
return &s.Scores, nil
case "action":
ar, _ := s.currentAction()
return starlark.String(ar.Action), nil
case "possible_actions":
return stringTuple(s.PossibleActions), nil
case "header":
return &HeaderDict{data: s.ConnectHeader}, nil
case "misc":
return &s.misc, nil
default:
return nil, nil
}
}
func (s *TLSSession) SetField(name string, val starlark.Value) error {
if s.frozen {
return errors.New("can't set a field of a frozen object")
}
switch name {
case "sni":
return assignStarlarkString(&s.SNI, val)
case "server_addr":
return assignStarlarkString(&s.ServerAddr, val)
case "source_ip":
var ip string
if err := assignStarlarkString(&ip, val); err != nil {
return err
}
parsed := net.ParseIP(ip)
if parsed == nil {
return fmt.Errorf("%q is not a valid IP address", ip)
}
s.SourceIP = parsed
return nil
case "action":
var newAction string
if err := assignStarlarkString(&newAction, val); err != nil {
return err
}
return s.setAction(newAction)
default:
return starlark.NoSuchAttrError(fmt.Sprintf("can't assign to .%s field of TLSSession", name))
}
}
var errCertMismatch = errors.New("server certificate changed between original connection and redial")
func certPoolWith(certs []*x509.Certificate) *x509.CertPool {
pool := x509.NewCertPool()
for _, c := range certs {
pool.AddCert(c)
}
return pool
}
// A insertingConn is a net.Conn that inserts extra data at the start of the
// incoming data stream.
type insertingConn struct {
net.Conn
extraData []byte
}
func (c *insertingConn) Read(p []byte) (n int, err error) {
if len(c.extraData) == 0 {
return c.Conn.Read(p)
}
n = copy(p, c.extraData)
c.extraData = c.extraData[n:]
return
}
// A singleListener is a net.Listener that returns a single connection, then
// gives the error io.EOF.
type singleListener struct {
conn net.Conn
once sync.Once
}
func (s *singleListener) Accept() (net.Conn, error) {
var c net.Conn
s.once.Do(func() {
c = s.conn
})
if c != nil {
return c, nil
}
return nil, io.EOF
}
func (s *singleListener) Close() error {
s.once.Do(func() {
s.conn.Close()
})
return nil
}
func (s *singleListener) Addr() net.Addr {
return s.conn.LocalAddr()
}
// imitateCertificate returns a new TLS certificate that has most of the same
// data as serverCert but is signed by Redwood's root certificate, or
// self-signed.
func imitateCertificate(serverCert *x509.Certificate, selfSigned bool, sni string) (cert tls.Certificate, err error) {
conf := getConfig()
// Use a hash of the real certificate (plus some other things) as the serial number.
h := md5.New()
h.Write(serverCert.Raw)
for _, c := range conf.TLSCert.Certificate {
h.Write(c)
}
if sni != "" {
io.WriteString(h, sni)
}
template := &x509.Certificate{
SerialNumber: big.NewInt(0).SetBytes(h.Sum(nil)),
Subject: serverCert.Subject,
NotBefore: serverCert.NotBefore,
NotAfter: serverCert.NotAfter,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: serverCert.ExtKeyUsage,
UnknownExtKeyUsage: serverCert.UnknownExtKeyUsage,
BasicConstraintsValid: false,
SubjectKeyId: nil,
DNSNames: serverCert.DNSNames,
PermittedDNSDomainsCritical: serverCert.PermittedDNSDomainsCritical,
PermittedDNSDomains: serverCert.PermittedDNSDomains,
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
// If sni is not blank, make a certificate that covers only that domain,
// instead of all the domains covered by the original certificate.
if sni != "" {
template.DNSNames = []string{sni}
template.Subject.CommonName = sni
}
var newCertBytes []byte
if selfSigned {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, template, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
} else {
newCertBytes, err = x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
}
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
if !selfSigned {
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
}
return newCert, nil
}
// fakeCertificate returns a fabricated certificate for the server identified by sni.
func fakeCertificate(sni string) (cert tls.Certificate, err error) {
conf := getConfig()
serial, err := rand.Int(rand.Reader, big.NewInt(1<<62))
if err != nil {
return tls.Certificate{}, err
}
y, m, d := time.Now().Date()
template := &x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{CommonName: sni},
NotBefore: time.Date(y, m, d, 0, 0, 0, 0, time.Local),
NotAfter: time.Date(y, m+1, d, 0, 0, 0, 0, time.Local),
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
DNSNames: []string{sni},
SignatureAlgorithm: x509.UnknownSignatureAlgorithm,
}
newCertBytes, err := x509.CreateCertificate(rand.Reader, template, conf.ParsedTLSCert, conf.ParsedTLSCert.PublicKey, conf.TLSCert.PrivateKey)
if err != nil {
return tls.Certificate{}, err
}
newCert := tls.Certificate{
Certificate: [][]byte{newCertBytes},
PrivateKey: conf.TLSCert.PrivateKey,
}
newCert.Certificate = append(newCert.Certificate, conf.TLSCert.Certificate...)
return newCert, nil
}
func validCert(cert *x509.Certificate, intermediates []*x509.Certificate) bool {
conf := getConfig()
pool := certPoolWith(intermediates)
_, err := cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
if conf.ExtraRootCerts != nil {
_, err = cert.Verify(x509.VerifyOptions{Roots: conf.ExtraRootCerts, Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
return true
}
}
// Before we give up, we'll try fetching some intermediate certificates.
if len(cert.IssuingCertificateURL) == 0 {
return false
}
toFetch := cert.IssuingCertificateURL
fetched := make(map[string]bool)
for i := 0; i < len(toFetch); i++ {
certURL := toFetch[i]
if fetched[certURL] {
continue
}
resp, err := http.Get(certURL)
if err == nil {
defer resp.Body.Close()
}
if err != nil || resp.StatusCode != 200 {
continue
}
fetchedCert, err := ioutil.ReadAll(resp.Body)
if err != nil {
continue
}
// The fetched certificate might be in either DER or PEM format.
if bytes.Contains(fetchedCert, []byte("-----BEGIN CERTIFICATE-----")) {
// It's PEM.
var certDER *pem.Block
for {
certDER, fetchedCert = pem.Decode(fetchedCert)
if certDER == nil {
break
}
if certDER.Type != "CERTIFICATE" {
continue
}
thisCert, err := x509.ParseCertificate(certDER.Bytes)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
} else {
// Hopefully it's DER.
thisCert, err := x509.ParseCertificate(fetchedCert)
if err != nil {
continue
}
pool.AddCert(thisCert)
toFetch = append(toFetch, thisCert.IssuingCertificateURL...)
}
}
_, err = cert.Verify(x509.VerifyOptions{Intermediates: pool})
if err == nil {
return true
}
if _, ok := err.(x509.UnknownAuthorityError); !ok {
// There was an error, but not because the certificate wasn't signed
// by a recognized CA. So we go ahead and use the cert and let
// the client experience the same error.
return true
}
return false
}
var ErrObsoleteSSLVersion = errors.New("obsolete SSL protocol version")
var ErrInvalidSSL = errors.New("invalid first byte for SSL connection; possibly some other protocol")
func readClientHello(conn net.Conn) (hello []byte, err error) {
conn.SetReadDeadline(time.Now().Add(10 * time.Second))
defer conn.SetReadDeadline(time.Time{})
var header [5]byte
n, err := io.ReadFull(conn, header[:])
hello = header[:n]
if err != nil {
return hello, err
}
if header[0] != 22 {
if header[0] == 128 {
return hello, ErrObsoleteSSLVersion
}
return hello, ErrInvalidSSL
}
if header[1] != 3 {
return hello, fmt.Errorf("expected major version of 3, got %d", header[1])
}
recordLen := int(header[3])<<8 | int(header[4])
if recordLen > 0x3000 {
return hello, fmt.Errorf("expected length less than 12kB, got %d", recordLen)
}
if recordLen < 4 {
return hello, fmt.Errorf("expected length of at least 4 bytes, got %d", recordLen)
}
protocolData := make([]byte, recordLen)
n, err = io.ReadFull(conn, protocolData)
hello = append(hello, protocolData[:n]...)
if err != nil {
return hello, err
}
if protocolData[0] != 1 {
return hello, fmt.Errorf("Expected message type 1 (ClientHello), got %d", protocolData[0])
}
protocolLen := int(protocolData[1])<<16 | int(protocolData[2])<<8 | int(protocolData[3])
if protocolLen != recordLen-4 {
return hello, fmt.Errorf("recordLen=%d, protocolLen=%d", recordLen, protocolLen)
}
return hello, nil
}
// parseClientHello parses some useful information out of a ClientHello message.
// It returns a ClientHelloInfo with only the following fields filled in:
// ServerName and SupportedProtocols.
func parseClientHello(data []byte) (*tls.ClientHelloInfo, error) {
// The implementation of this function is based on crypto/tls.clientHelloMsg.unmarshal
var info tls.ClientHelloInfo
s := cryptobyte.String(data)
// Skip message type, length, version, and random.
if !s.Skip(43) {
return nil, errors.New("too short")
}
var sessionID cryptobyte.String
if !s.ReadUint8LengthPrefixed(&sessionID) {
return nil, errors.New("bad session ID")
}
var cipherSuites cryptobyte.String
if !s.ReadUint16LengthPrefixed(&cipherSuites) {
return nil, errors.New("bad cipher suites")
}
var compressionMethods cryptobyte.String
if !s.ReadUint8LengthPrefixed(&compressionMethods) {
return nil, errors.New("bad compression methods")
}
if s.Empty() {
// no extensions
return &info, nil
}
var extensions cryptobyte.String
if !s.ReadUint16LengthPrefixed(&extensions) || !s.Empty() {
return nil, errors.New("bad extensions")
}
for !extensions.Empty() {
var extension uint16
var extData cryptobyte.String
if !extensions.ReadUint16(&extension) || !extensions.ReadUint16LengthPrefixed(&extData) {
return nil, errors.New("bad extension")
}
switch extension {
case 0: // server name
var nameList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&nameList) || nameList.Empty() {
return nil, errors.New("bad name list")
}
for !nameList.Empty() {
var nameType uint8
var serverName cryptobyte.String
if !nameList.ReadUint8(&nameType) || !nameList.ReadUint16LengthPrefixed(&serverName) || serverName.Empty() {
return nil, errors.New("bad entry in name list")
}
if nameType != 0 {
continue
}
if info.ServerName != "" {
return nil, errors.New("multiple server names")
}
info.ServerName = string(serverName)
if strings.HasSuffix(info.ServerName, ".") {
return nil, errors.New("server name ends with dot")
}
}
case 16: // ALPN
var protoList cryptobyte.String
if !extData.ReadUint16LengthPrefixed(&protoList) || protoList.Empty() {
return nil, errors.New("bad ALPN protocol list")
}
for !protoList.Empty() {
var proto cryptobyte.String
if !protoList.ReadUint8LengthPrefixed(&proto) || proto.Empty() {
return nil, errors.New("bad ALPN protocol list entry")
}
info.SupportedProtos = append(info.SupportedProtos, string(proto))
}
default:
// ignore
continue
}
if !extData.Empty() {
return nil, errors.New("extra data at end of extension")
}
}
return &info, nil
}
func (c *config) addTrustedRoots(certPath string) error {
if c.ExtraRootCerts == nil {
c.ExtraRootCerts = x509.NewCertPool()
}
pem, err := ioutil.ReadFile(certPath)
if err != nil {
return err
}
if !c.ExtraRootCerts.AppendCertsFromPEM(pem) {
return fmt.Errorf("no certificates found in %s", certPath)
}
return nil
}
| {
return 0, errors.New("unhashable type: TLSSession")
} | identifier_body |
kek.py | #Импортирование библиотек
import PySimpleGUI as sg
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from math import log
import numpy as np
import pandas as pd
import nltk
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
import pickle
#Функция сохранения состояния обученности классификатора
def save(obj):
with open('sis.pickle', 'wb') as f:
pickle.dump(obj, f)
#Функция загрузки состояния обученности классификатора
def load():
with open('sis.pickle', 'rb') as f:
obj_new = pickle.load(f)
return obj_new
#Функция визуализации словаря спам слов
def show_spam(spam_words):
spam_wc = WordCloud(width = 512,height = 512).generate(spam_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(spam_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
#Функция визуализации словаря легетимных слов
def show_ham(ham_words):
ham_wc = WordCloud(width = 512,height = 512).generate(ham_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(ham_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
#Чтение данных из таблицы
oldmails = pd.read_csv('spam.csv', encoding = 'latin-1')
oldmails.head()
mailz = pd.read_csv('messages.csv', encoding = 'latin-1')
mailz.head()
#Преобразовани таблицы с данными, удаление лишних столбцов
oldmails.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1, inplace = True)
oldmails.head()
mailz.drop(['subject'], axis = 1, inplace = True)
mailz.head()
#Преобразовани таблицы с данными, переименование столбцов
oldmails.rename(columns = {'v1': 'labels', 'v2': 'message'}, inplace = True)
oldmails.head()
oldmails['labels'].value_counts()
mailz['label'].value_counts()
#Преобразовани таблицы с данными, переименование значений столбцов
oldmails['label'] = oldmails['labels'].map({'ham': 0, 'spam': 1})
oldmails.head()
#Преобразовани таблицы с данными, удаление лишних столбцов
oldmails.drop(['labels'], axis = 1, inplace = True)
oldmails.head()
#Преобразовани таблицы с данными, слияние двух массивов для обучения
mails = pd.concat((mailz, oldmails), ignore_index=True)
#Разбиение данных на два массива
totalMails = (int(len(mails))-1)
trainIndex, testIndex = list(), list()
for i in range(mails.shape[0]):
if np.random.uniform(0, 1) < 0.75:
trainIndex += [i]
else:
testIndex += [i]
trainData = mails.loc[trainIndex]
testData = mails.loc[testIndex]
#Отображение данных в таблице
trainData.reset_index(inplace = True)
trainData.drop(['index'], axis = 1, inplace = True)
trainData.head()
testData.reset_index(inplace = True)
testData.drop(['index'], axis = 1, inplace = True)
testData.head()
#Отображение набора тренировочных данных
trainData['label'].value_counts()
#Отображение набора данных для тестирования
testData['label'].value_coun | ие словрей спам и не спам слов
spam_words = ' '.join(list(mails[mails['label'] == 1]['message']))
ham_words = ' '.join(list(mails[mails['label'] == 0]['message']))
trainData.head()
trainData['label'].value_counts()
testData.head()
testData['label'].value_counts()
#Обработка текста сообщений
def process_message(message, lower_case = True, stem = True, stop_words = True, gram = 2):
if lower_case:
message = message.lower()
words = word_tokenize(message)
words = [w for w in words if len(w) > 2]
if gram > 1:
w = []
for i in range(len(words) - gram + 1):
w += [' '.join(words[i:i + gram])]
return w
if stop_words:
sw = stopwords.words('english')
words = [word for word in words if word not in sw]
if stem:
stemmer = PorterStemmer()
words = [stemmer.stem(word) for word in words]
print(words)
return words
#Классификация данных
class SpamClassifier(object):
def __init__(self, trainData, method='tf-idf'):
self.mails, self.labels = trainData['message'], trainData['label']
self.method = method
#Функция обучения
def train(self):
self.calc_TF_and_IDF()
if self.method == 'tf-idf':
self.calc_TF_IDF()
else:
self.calc_prob()
def calc_prob(self):
self.prob_spam = dict()
self.prob_ham = dict()
for word in self.tf_spam:
self.prob_spam[word] = (self.tf_spam[word] + 1) / (self.spam_words + \
len(list(self.tf_spam.keys())))
for word in self.tf_ham:
self.prob_ham[word] = (self.tf_ham[word] + 1) / (self.ham_words + \
len(list(self.tf_ham.keys())))
self.prob_spam_mail, self.prob_ham_mail = self.spam_mails / self.total_mails, self.ham_mails / self.total_mails
#Вычисление вероятностей
def calc_TF_and_IDF(self):
noOfMessages = self.mails.shape[0]
self.spam_mails, self.ham_mails = self.labels.value_counts()[1], self.labels.value_counts()[0]
self.total_mails = self.spam_mails + self.ham_mails
self.spam_words = 0
self.ham_words = 0
self.tf_spam = dict()
self.tf_ham = dict()
self.idf_spam = dict()
self.idf_ham = dict()
for i in range(noOfMessages):
message_processed = process_message(self.mails[i])
count = list()
for word in message_processed:
if self.labels[i]:
self.tf_spam[word] = self.tf_spam.get(word, 0) + 1
self.spam_words += 1
else:
self.tf_ham[word] = self.tf_ham.get(word, 0) + 1
self.ham_words += 1
if word not in count:
count += [word]
for word in count:
if self.labels[i]:
self.idf_spam[word] = self.idf_spam.get(word, 0) + 1
else:
self.idf_ham[word] = self.idf_ham.get(word, 0) + 1
def calc_TF_IDF(self):
self.prob_spam = dict()
self.prob_ham = dict()
self.sum_tf_idf_spam = 0
self.sum_tf_idf_ham = 0
for word in self.tf_spam:
self.prob_spam[word] = (self.tf_spam[word]) * log((self.spam_mails + self.ham_mails) \
/ (self.idf_spam[word] + self.idf_ham.get(word, 0)))
self.sum_tf_idf_spam += self.prob_spam[word]
for word in self.tf_spam:
self.prob_spam[word] = (self.prob_spam[word] + 1) / (
self.sum_tf_idf_spam + len(list(self.prob_spam.keys())))
for word in self.tf_ham:
self.prob_ham[word] = (self.tf_ham[word]) * log((self.spam_mails + self.ham_mails) \
/ (self.idf_spam.get(word, 0) + self.idf_ham[word]))
self.sum_tf_idf_ham += self.prob_ham[word]
for word in self.tf_ham:
self.prob_ham[word] = (self.prob_ham[word] + 1) / (self.sum_tf_idf_ham + len(list(self.prob_ham.keys())))
self.prob_spam_mail, self.prob_ham_mail = self.spam_mails / self.total_mails, self.ham_mails / self.total_mails
#Непосредственно функция классификации на основе теоремы Байеса
def classify(self, processed_message):
pSpam, pHam = 0, 0
for word in processed_message:
if word in self.prob_spam:
pSpam += log(self.prob_spam[word])
else:
if self.method == 'tf-idf':
pSpam -= log(self.sum_tf_idf_spam + len(list(self.prob_spam.keys())))
else:
pSpam -= log(self.spam_words + len(list(self.prob_spam.keys())))
if word in self.prob_ham:
pHam += log(self.prob_ham[word])
else:
if self.method == 'tf-idf':
pHam -= log(self.sum_tf_idf_ham + len(list(self.prob_ham.keys())))
else:
pHam -= log(self.ham_words + len(list(self.prob_ham.keys())))
pSpam += log(self.prob_spam_mail)
pHam += log(self.prob_ham_mail)
return pSpam >= pHam
#Функция предсказания является ли сообщение спамом или нет
def predict(self, testData):
result = dict()
for (i, message) in enumerate(testData):
processed_message = process_message(message)
result[i] = int(self.classify(processed_message))
return result
#Функция вычисления качества работы алгоритма
def metrics(labels, predictions):
true_pos, true_neg, false_pos, false_neg = 0, 0, 0, 0
for i in range(len(labels)):
true_pos += int(labels[i] == 1 and predictions[i] == 1)
true_neg += int(labels[i] == 0 and predictions[i] == 0)
false_pos += int(labels[i] == 0 and predictions[i] == 1)
false_neg += int(labels[i] == 1 and predictions[i] == 0)
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
Fscore = 2 * precision * recall / (precision + recall)
accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
return precision, recall, Fscore, accuracy
df = mails
#Обработка сообщений с помощью библиотек
df['message'] = df.message.map(lambda x: x.lower())
df['message'] = df.message.str.replace('[^\w\s]', '')
df['message'] = df['message'].apply(nltk.word_tokenize)
stemmer = PorterStemmer()
df['message'] = df['message'].apply(lambda x: [stemmer.stem(y) for y in x])
df['message'] = df['message'].apply(lambda x: ' '.join(x))
#Преобразование сообщений в таблицу векторов
count_vect = CountVectorizer()
counts = count_vect.fit_transform(df['message'])
transformer = TfidfTransformer().fit(counts)
counts = transformer.transform(counts)
#Разбиение данных на обучающий и тестирующие наборы с использованием библиотек
X_train, X_test, y_train, y_test = train_test_split(counts, df['label'], test_size=0.1, random_state=69)
#Классификация данных с помощью библиотеки scikitlearn
model = MultinomialNB().fit(X_train, y_train)
#Вычисление качества работы алгоритма библиотеки
predicted = model.predict(X_test)
#Интерфейс программы
layout = [
[sg.Button('Обучение'), sg.Button('Показать спам слова'), sg.Button('Показать не спам слова')],
[sg.Text('Введите сообщение для проверки на спамовость')],
[sg.Input(size=(50, 30), key='-IN-')],
[sg.Button('Проверить'), sg.Button('Выход'), sg.Button('Посчитать метрики')]
]
window = sg.Window('Настройка классификатора', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Выход':
break
if event == 'Посчитать метрики':
sc_tf_idf = load()
preds_tf_idf = sc_tf_idf.predict(testData['message'])
precision, recall, Fscore, accuracy = metrics(testData['label'], preds_tf_idf)
sg.popup('Метрики',
"Точность:", precision,
"Полнота:", recall,
"F-мера:", Fscore,
"Численная оценка качества алгоритма:", accuracy,
"Точность классификации для тестового набора данных:", np.mean(predicted == y_test),
"Размер тестовой выборки:", len(y_test),
"Количество легитимных писем попавших в не спам:", confusion_matrix(y_test, predicted)[0][0],
"Количество легитимных писем попавших в спам:", confusion_matrix(y_test, predicted)[0][1],
"Количество спам писем попавших в не спам:", confusion_matrix(y_test, predicted)[1][0],
"Количество спам писем попавших в спам:", confusion_matrix(y_test, predicted)[1][1])
if event == 'Проверить':
text_input = values['-IN-']
pm = process_message(text_input)
sc_tf_idf = load()
sc_tf_idf.classify(pm)
if sc_tf_idf.classify(pm) == True:
sg.popup('Спам')
else:
sg.popup('Не спам')
if event == 'Показать спам слова':
show_spam(spam_words)
if event == 'Показать не спам слова':
show_ham(ham_words)
if event == 'Обучение':
sc_tf_idf = SpamClassifier(trainData, 'tf-idf')
sc_tf_idf.train()
save(sc_tf_idf)
window.close() | ts()
#Формирован | conditional_block |
kek.py | #Импортирование библиотек
import PySimpleGUI as sg
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from math import log
import numpy as np
import pandas as pd
import nltk
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
import pickle
#Функция сохранения состояния обученности классификатора
def save(obj):
with open('sis.pickle', 'wb') as f:
pickle.dump(obj, f)
#Функция загрузки состояния обученности классификатора
def load():
with open('sis.pickle', 'rb') as f:
obj_new = pickle.load(f)
return obj_new
#Функция визуализации словаря спам | ate(spam_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(spam_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
#Функция визуализации словаря легетимных слов
def show_ham(ham_words):
ham_wc = WordCloud(width = 512,height = 512).generate(ham_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(ham_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
#Чтение данных из таблицы
oldmails = pd.read_csv('spam.csv', encoding = 'latin-1')
oldmails.head()
mailz = pd.read_csv('messages.csv', encoding = 'latin-1')
mailz.head()
#Преобразовани таблицы с данными, удаление лишних столбцов
oldmails.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1, inplace = True)
oldmails.head()
mailz.drop(['subject'], axis = 1, inplace = True)
mailz.head()
#Преобразовани таблицы с данными, переименование столбцов
oldmails.rename(columns = {'v1': 'labels', 'v2': 'message'}, inplace = True)
oldmails.head()
oldmails['labels'].value_counts()
mailz['label'].value_counts()
#Преобразовани таблицы с данными, переименование значений столбцов
oldmails['label'] = oldmails['labels'].map({'ham': 0, 'spam': 1})
oldmails.head()
#Преобразовани таблицы с данными, удаление лишних столбцов
oldmails.drop(['labels'], axis = 1, inplace = True)
oldmails.head()
#Преобразовани таблицы с данными, слияние двух массивов для обучения
mails = pd.concat((mailz, oldmails), ignore_index=True)
#Разбиение данных на два массива
totalMails = (int(len(mails))-1)
trainIndex, testIndex = list(), list()
for i in range(mails.shape[0]):
if np.random.uniform(0, 1) < 0.75:
trainIndex += [i]
else:
testIndex += [i]
trainData = mails.loc[trainIndex]
testData = mails.loc[testIndex]
#Отображение данных в таблице
trainData.reset_index(inplace = True)
trainData.drop(['index'], axis = 1, inplace = True)
trainData.head()
testData.reset_index(inplace = True)
testData.drop(['index'], axis = 1, inplace = True)
testData.head()
#Отображение набора тренировочных данных
trainData['label'].value_counts()
#Отображение набора данных для тестирования
testData['label'].value_counts()
#Формирование словрей спам и не спам слов
spam_words = ' '.join(list(mails[mails['label'] == 1]['message']))
ham_words = ' '.join(list(mails[mails['label'] == 0]['message']))
trainData.head()
trainData['label'].value_counts()
testData.head()
testData['label'].value_counts()
#Обработка текста сообщений
def process_message(message, lower_case = True, stem = True, stop_words = True, gram = 2):
if lower_case:
message = message.lower()
words = word_tokenize(message)
words = [w for w in words if len(w) > 2]
if gram > 1:
w = []
for i in range(len(words) - gram + 1):
w += [' '.join(words[i:i + gram])]
return w
if stop_words:
sw = stopwords.words('english')
words = [word for word in words if word not in sw]
if stem:
stemmer = PorterStemmer()
words = [stemmer.stem(word) for word in words]
print(words)
return words
#Классификация данных
class SpamClassifier(object):
def __init__(self, trainData, method='tf-idf'):
self.mails, self.labels = trainData['message'], trainData['label']
self.method = method
#Функция обучения
def train(self):
self.calc_TF_and_IDF()
if self.method == 'tf-idf':
self.calc_TF_IDF()
else:
self.calc_prob()
def calc_prob(self):
self.prob_spam = dict()
self.prob_ham = dict()
for word in self.tf_spam:
self.prob_spam[word] = (self.tf_spam[word] + 1) / (self.spam_words + \
len(list(self.tf_spam.keys())))
for word in self.tf_ham:
self.prob_ham[word] = (self.tf_ham[word] + 1) / (self.ham_words + \
len(list(self.tf_ham.keys())))
self.prob_spam_mail, self.prob_ham_mail = self.spam_mails / self.total_mails, self.ham_mails / self.total_mails
#Вычисление вероятностей
def calc_TF_and_IDF(self):
noOfMessages = self.mails.shape[0]
self.spam_mails, self.ham_mails = self.labels.value_counts()[1], self.labels.value_counts()[0]
self.total_mails = self.spam_mails + self.ham_mails
self.spam_words = 0
self.ham_words = 0
self.tf_spam = dict()
self.tf_ham = dict()
self.idf_spam = dict()
self.idf_ham = dict()
for i in range(noOfMessages):
message_processed = process_message(self.mails[i])
count = list()
for word in message_processed:
if self.labels[i]:
self.tf_spam[word] = self.tf_spam.get(word, 0) + 1
self.spam_words += 1
else:
self.tf_ham[word] = self.tf_ham.get(word, 0) + 1
self.ham_words += 1
if word not in count:
count += [word]
for word in count:
if self.labels[i]:
self.idf_spam[word] = self.idf_spam.get(word, 0) + 1
else:
self.idf_ham[word] = self.idf_ham.get(word, 0) + 1
def calc_TF_IDF(self):
self.prob_spam = dict()
self.prob_ham = dict()
self.sum_tf_idf_spam = 0
self.sum_tf_idf_ham = 0
for word in self.tf_spam:
self.prob_spam[word] = (self.tf_spam[word]) * log((self.spam_mails + self.ham_mails) \
/ (self.idf_spam[word] + self.idf_ham.get(word, 0)))
self.sum_tf_idf_spam += self.prob_spam[word]
for word in self.tf_spam:
self.prob_spam[word] = (self.prob_spam[word] + 1) / (
self.sum_tf_idf_spam + len(list(self.prob_spam.keys())))
for word in self.tf_ham:
self.prob_ham[word] = (self.tf_ham[word]) * log((self.spam_mails + self.ham_mails) \
/ (self.idf_spam.get(word, 0) + self.idf_ham[word]))
self.sum_tf_idf_ham += self.prob_ham[word]
for word in self.tf_ham:
self.prob_ham[word] = (self.prob_ham[word] + 1) / (self.sum_tf_idf_ham + len(list(self.prob_ham.keys())))
self.prob_spam_mail, self.prob_ham_mail = self.spam_mails / self.total_mails, self.ham_mails / self.total_mails
#Непосредственно функция классификации на основе теоремы Байеса
def classify(self, processed_message):
pSpam, pHam = 0, 0
for word in processed_message:
if word in self.prob_spam:
pSpam += log(self.prob_spam[word])
else:
if self.method == 'tf-idf':
pSpam -= log(self.sum_tf_idf_spam + len(list(self.prob_spam.keys())))
else:
pSpam -= log(self.spam_words + len(list(self.prob_spam.keys())))
if word in self.prob_ham:
pHam += log(self.prob_ham[word])
else:
if self.method == 'tf-idf':
pHam -= log(self.sum_tf_idf_ham + len(list(self.prob_ham.keys())))
else:
pHam -= log(self.ham_words + len(list(self.prob_ham.keys())))
pSpam += log(self.prob_spam_mail)
pHam += log(self.prob_ham_mail)
return pSpam >= pHam
#Функция предсказания является ли сообщение спамом или нет
def predict(self, testData):
result = dict()
for (i, message) in enumerate(testData):
processed_message = process_message(message)
result[i] = int(self.classify(processed_message))
return result
#Функция вычисления качества работы алгоритма
def metrics(labels, predictions):
true_pos, true_neg, false_pos, false_neg = 0, 0, 0, 0
for i in range(len(labels)):
true_pos += int(labels[i] == 1 and predictions[i] == 1)
true_neg += int(labels[i] == 0 and predictions[i] == 0)
false_pos += int(labels[i] == 0 and predictions[i] == 1)
false_neg += int(labels[i] == 1 and predictions[i] == 0)
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
Fscore = 2 * precision * recall / (precision + recall)
accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
return precision, recall, Fscore, accuracy
df = mails
#Обработка сообщений с помощью библиотек
df['message'] = df.message.map(lambda x: x.lower())
df['message'] = df.message.str.replace('[^\w\s]', '')
df['message'] = df['message'].apply(nltk.word_tokenize)
stemmer = PorterStemmer()
df['message'] = df['message'].apply(lambda x: [stemmer.stem(y) for y in x])
df['message'] = df['message'].apply(lambda x: ' '.join(x))
#Преобразование сообщений в таблицу векторов
count_vect = CountVectorizer()
counts = count_vect.fit_transform(df['message'])
transformer = TfidfTransformer().fit(counts)
counts = transformer.transform(counts)
#Разбиение данных на обучающий и тестирующие наборы с использованием библиотек
X_train, X_test, y_train, y_test = train_test_split(counts, df['label'], test_size=0.1, random_state=69)
#Классификация данных с помощью библиотеки scikitlearn
model = MultinomialNB().fit(X_train, y_train)
#Вычисление качества работы алгоритма библиотеки
predicted = model.predict(X_test)
#Интерфейс программы
layout = [
[sg.Button('Обучение'), sg.Button('Показать спам слова'), sg.Button('Показать не спам слова')],
[sg.Text('Введите сообщение для проверки на спамовость')],
[sg.Input(size=(50, 30), key='-IN-')],
[sg.Button('Проверить'), sg.Button('Выход'), sg.Button('Посчитать метрики')]
]
window = sg.Window('Настройка классификатора', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Выход':
break
if event == 'Посчитать метрики':
sc_tf_idf = load()
preds_tf_idf = sc_tf_idf.predict(testData['message'])
precision, recall, Fscore, accuracy = metrics(testData['label'], preds_tf_idf)
sg.popup('Метрики',
"Точность:", precision,
"Полнота:", recall,
"F-мера:", Fscore,
"Численная оценка качества алгоритма:", accuracy,
"Точность классификации для тестового набора данных:", np.mean(predicted == y_test),
"Размер тестовой выборки:", len(y_test),
"Количество легитимных писем попавших в не спам:", confusion_matrix(y_test, predicted)[0][0],
"Количество легитимных писем попавших в спам:", confusion_matrix(y_test, predicted)[0][1],
"Количество спам писем попавших в не спам:", confusion_matrix(y_test, predicted)[1][0],
"Количество спам писем попавших в спам:", confusion_matrix(y_test, predicted)[1][1])
if event == 'Проверить':
text_input = values['-IN-']
pm = process_message(text_input)
sc_tf_idf = load()
sc_tf_idf.classify(pm)
if sc_tf_idf.classify(pm) == True:
sg.popup('Спам')
else:
sg.popup('Не спам')
if event == 'Показать спам слова':
show_spam(spam_words)
if event == 'Показать не спам слова':
show_ham(ham_words)
if event == 'Обучение':
sc_tf_idf = SpamClassifier(trainData, 'tf-idf')
sc_tf_idf.train()
save(sc_tf_idf)
window.close() | слов
def show_spam(spam_words):
spam_wc = WordCloud(width = 512,height = 512).gener | identifier_body |
kek.py | #Импортирование библиотек
import PySimpleGUI as sg
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from math import log
import numpy as np
import pandas as pd
import nltk
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
import pickle
#Функция сохранения состояния обученности классификатора
def save(obj):
with open('sis.pickle', 'wb') as f:
pickle.dump(obj, f)
#Функция загрузки состояния обученности классификатора
def load():
with open('sis.pickle', 'rb') as f:
obj_new = pickle.load(f)
return obj_new
#Функция визуализации словаря спам слов
def show_spam(spam_words):
spam_wc = WordCloud(width = 512,height = 512).generate(spam_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(spam_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
#Функция визуализации словаря легетимных слов
def show_ham(ham_words):
ham_wc = WordCloud(width = 512,height = 512).generate(ham_words)
plt.figure(figsize = (10, 8), facecolor = 'k')
plt.imshow(ham_wc)
plt.axis('off')
plt.tight_layout(pad = 0)
plt.show()
#Чтение данных из таблицы
oldmails = pd.read_csv('spam.csv', encoding = 'latin-1')
oldmails.head()
mailz = pd.read_csv('messages.csv', encoding = 'latin-1')
mailz.head()
#Преобразовани таблицы с данными, удаление лишних столбцов
oldmails.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1, inplace = True)
oldmails.head()
mailz.drop(['subject'], axis = 1, inplace = True)
mailz.head()
#Преобразовани таблицы с данными, переименование столбцов
oldmails.rename(columns = {'v1': 'labels', 'v2': 'message'}, inplace = True)
oldmails.head()
oldmails['labels'].value_counts()
mailz['label'].value_counts()
#Преобразовани таблицы с данными, переименование значений столбцов
oldmails['label'] = oldmails['labels'].map({'ham': 0, 'spam': 1})
oldmails.head()
#Преобразовани таблицы с данными, удаление лишних столбцов
oldmails.drop(['labels'], axis = 1, inplace = True)
oldmails.head()
#Преобразовани таблицы с данными, слияние двух массивов для обучения
mails = pd.concat((mailz, oldmails), ignore_index=True)
#Разбиение данных на два массива
totalMails = (int(len(mails))-1)
trainIndex, testIndex = list(), list()
for i in range(mails.shape[0]):
if np.random.uniform(0, 1) < 0.75:
trainIndex += [i]
else:
testIndex += [i]
trainData = mails.loc[trainIndex]
testData = mails.loc[testIndex]
#Отображение данных в таблице
trainData.reset_index(inplace = True)
trainData.drop(['index'], axis = 1, inplace = True)
trainData.head()
testData.reset_index(inplace = True)
testData.drop(['index'], axis = 1, inplace = True)
testData.head()
#Отображение набора тренировочных данных
trainData['label'].value_counts()
#Отображение набора данных для тестирования
testData['label'].value_counts()
#Формирование словрей спам и не спам слов
spam_words = ' '.join(list(mails[mails['label'] == 1]['message']))
ham_words = ' '.join(list(mails[mails['label'] == 0]['message']))
trainData.head()
trainData['label'].value_counts()
testData.head()
testData['label'].value_counts()
#Обработка текста сообщений
def process_message(message, lower_case = True, stem = True, stop_words = True, gram = 2):
if lower_case:
message = message.lower()
words = word_tokenize(message)
words = [w for w in words if len(w) > 2]
if gram > 1:
w = []
for i in range(len(words) - gram + 1):
w += [' '.join(words[i:i + gram])]
return w
if stop_words:
sw = stopwords.words('english')
words = [word for word in words if word not in sw]
if stem:
stemmer = PorterStemmer()
words = [stemmer.stem(word) for word in words]
print(words)
return words
#Классификация данных
class SpamClassifier(object):
def __init__(self, trainData, method='tf-idf'):
self.mails, self.labels = trainData['message'], trainData['label']
self.method = method
#Функция обучения
def train(self):
self.calc_TF_and_IDF()
if self.method == 'tf-idf':
self.calc_TF_IDF()
else:
self.calc_prob()
def calc_prob(self):
self.prob_spam = dict()
self.prob_ham = dict()
for word in self.tf_spam:
self.prob_spam[word] = (self.tf_spam[word] + 1) / (self.spam_words + \
len(list(self.tf_spam.keys())))
for word in self.tf_ham:
self.prob_ham[word] = (self.tf_ham[word] + 1) / (self.ham_words + \
len(list(self.tf_ham.keys())))
self.prob_spam_mail, self.prob_ham | = self.spam_mails / self.total_mails, self.ham_mails / self.total_mails
#Вычисление вероятностей
def calc_TF_and_IDF(self):
noOfMessages = self.mails.shape[0]
self.spam_mails, self.ham_mails = self.labels.value_counts()[1], self.labels.value_counts()[0]
self.total_mails = self.spam_mails + self.ham_mails
self.spam_words = 0
self.ham_words = 0
self.tf_spam = dict()
self.tf_ham = dict()
self.idf_spam = dict()
self.idf_ham = dict()
for i in range(noOfMessages):
message_processed = process_message(self.mails[i])
count = list()
for word in message_processed:
if self.labels[i]:
self.tf_spam[word] = self.tf_spam.get(word, 0) + 1
self.spam_words += 1
else:
self.tf_ham[word] = self.tf_ham.get(word, 0) + 1
self.ham_words += 1
if word not in count:
count += [word]
for word in count:
if self.labels[i]:
self.idf_spam[word] = self.idf_spam.get(word, 0) + 1
else:
self.idf_ham[word] = self.idf_ham.get(word, 0) + 1
def calc_TF_IDF(self):
self.prob_spam = dict()
self.prob_ham = dict()
self.sum_tf_idf_spam = 0
self.sum_tf_idf_ham = 0
for word in self.tf_spam:
self.prob_spam[word] = (self.tf_spam[word]) * log((self.spam_mails + self.ham_mails) \
/ (self.idf_spam[word] + self.idf_ham.get(word, 0)))
self.sum_tf_idf_spam += self.prob_spam[word]
for word in self.tf_spam:
self.prob_spam[word] = (self.prob_spam[word] + 1) / (
self.sum_tf_idf_spam + len(list(self.prob_spam.keys())))
for word in self.tf_ham:
self.prob_ham[word] = (self.tf_ham[word]) * log((self.spam_mails + self.ham_mails) \
/ (self.idf_spam.get(word, 0) + self.idf_ham[word]))
self.sum_tf_idf_ham += self.prob_ham[word]
for word in self.tf_ham:
self.prob_ham[word] = (self.prob_ham[word] + 1) / (self.sum_tf_idf_ham + len(list(self.prob_ham.keys())))
self.prob_spam_mail, self.prob_ham_mail = self.spam_mails / self.total_mails, self.ham_mails / self.total_mails
#Непосредственно функция классификации на основе теоремы Байеса
def classify(self, processed_message):
pSpam, pHam = 0, 0
for word in processed_message:
if word in self.prob_spam:
pSpam += log(self.prob_spam[word])
else:
if self.method == 'tf-idf':
pSpam -= log(self.sum_tf_idf_spam + len(list(self.prob_spam.keys())))
else:
pSpam -= log(self.spam_words + len(list(self.prob_spam.keys())))
if word in self.prob_ham:
pHam += log(self.prob_ham[word])
else:
if self.method == 'tf-idf':
pHam -= log(self.sum_tf_idf_ham + len(list(self.prob_ham.keys())))
else:
pHam -= log(self.ham_words + len(list(self.prob_ham.keys())))
pSpam += log(self.prob_spam_mail)
pHam += log(self.prob_ham_mail)
return pSpam >= pHam
#Функция предсказания является ли сообщение спамом или нет
def predict(self, testData):
result = dict()
for (i, message) in enumerate(testData):
processed_message = process_message(message)
result[i] = int(self.classify(processed_message))
return result
#Функция вычисления качества работы алгоритма
def metrics(labels, predictions):
true_pos, true_neg, false_pos, false_neg = 0, 0, 0, 0
for i in range(len(labels)):
true_pos += int(labels[i] == 1 and predictions[i] == 1)
true_neg += int(labels[i] == 0 and predictions[i] == 0)
false_pos += int(labels[i] == 0 and predictions[i] == 1)
false_neg += int(labels[i] == 1 and predictions[i] == 0)
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
Fscore = 2 * precision * recall / (precision + recall)
accuracy = (true_pos + true_neg) / (true_pos + true_neg + false_pos + false_neg)
return precision, recall, Fscore, accuracy
df = mails
#Обработка сообщений с помощью библиотек
df['message'] = df.message.map(lambda x: x.lower())
df['message'] = df.message.str.replace('[^\w\s]', '')
df['message'] = df['message'].apply(nltk.word_tokenize)
stemmer = PorterStemmer()
df['message'] = df['message'].apply(lambda x: [stemmer.stem(y) for y in x])
df['message'] = df['message'].apply(lambda x: ' '.join(x))
#Преобразование сообщений в таблицу векторов
count_vect = CountVectorizer()
counts = count_vect.fit_transform(df['message'])
transformer = TfidfTransformer().fit(counts)
counts = transformer.transform(counts)
#Разбиение данных на обучающий и тестирующие наборы с использованием библиотек
X_train, X_test, y_train, y_test = train_test_split(counts, df['label'], test_size=0.1, random_state=69)
#Классификация данных с помощью библиотеки scikitlearn
model = MultinomialNB().fit(X_train, y_train)
#Вычисление качества работы алгоритма библиотеки
predicted = model.predict(X_test)
#Интерфейс программы
layout = [
[sg.Button('Обучение'), sg.Button('Показать спам слова'), sg.Button('Показать не спам слова')],
[sg.Text('Введите сообщение для проверки на спамовость')],
[sg.Input(size=(50, 30), key='-IN-')],
[sg.Button('Проверить'), sg.Button('Выход'), sg.Button('Посчитать метрики')]
]
window = sg.Window('Настройка классификатора', layout)
while True:
event, values = window.read()
if event == sg.WIN_CLOSED or event == 'Выход':
break
if event == 'Посчитать метрики':
sc_tf_idf = load()
preds_tf_idf = sc_tf_idf.predict(testData['message'])
precision, recall, Fscore, accuracy = metrics(testData['label'], preds_tf_idf)
sg.popup('Метрики',
"Точность:", precision,
"Полнота:", recall,
"F-мера:", Fscore,
"Численная оценка качества алгоритма:", accuracy,
"Точность классификации для тестового набора данных:", np.mean(predicted == y_test),
"Размер тестовой выборки:", len(y_test),
"Количество легитимных писем попавших в не спам:", confusion_matrix(y_test, predicted)[0][0],
"Количество легитимных писем попавших в спам:", confusion_matrix(y_test, predicted)[0][1],
"Количество спам писем попавших в не спам:", confusion_matrix(y_test, predicted)[1][0],
"Количество спам писем попавших в спам:", confusion_matrix(y_test, predicted)[1][1])
if event == 'Проверить':
text_input = values['-IN-']
pm = process_message(text_input)
sc_tf_idf = load()
sc_tf_idf.classify(pm)
if sc_tf_idf.classify(pm) == True:
sg.popup('Спам')
else:
sg.popup('Не спам')
if event == 'Показать спам слова':
show_spam(spam_words)
if event == 'Показать не спам слова':
show_ham(ham_words)
if event == 'Обучение':
sc_tf_idf = SpamClassifier(trainData, 'tf-idf')
sc_tf_idf.train()
save(sc_tf_idf)
window.close() | _mail | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.